ngram
listlengths
0
82k
[ "user to set this property. For more information, please refer", "core.VarDesc.VarType.FP16 or dtype == core.VarDesc.VarType.BF16) return is_fp_dtype def is_integer(x): \"\"\"Return", "\"\"\" Returns a new tensor containing imaginary values of input", "return core.VarDesc.VarType.FP32 elif dtype == core.VarDesc.VarType.COMPLEX128: return core.VarDesc.VarType.FP64 else: return", "dtype == core.VarDesc.VarType.FP64: return core.VarDesc.VarType.COMPLEX128 else: return dtype def is_complex(x):", "== core.VarDesc.VarType.INT32 or dtype == core.VarDesc.VarType.INT64) return is_int_dtype def real(x,", "tensor containing imaginary values of the input tensor. Examples: ..", "== core.VarDesc.VarType.COMPLEX128: return core.VarDesc.VarType.FP64 else: return dtype def _real_to_complex_dtype(dtype): if", "2.0 (the \"License\"); # you may not use this file", "from __future__ import print_function from ..framework import core from ..fluid.layer_helper", "of input tensor. Args: x (Tensor): the input tensor, its", "3 + 4j], [4 + 3j, 5 + 2j, 6", "please refer to :ref:`api_guide_Name` . Returns: Tensor: a tensor containing", "= [] def _complex_to_real_dtype(dtype): if dtype == core.VarDesc.VarType.COMPLEX64: return core.VarDesc.VarType.FP32", "print_function from ..framework import core from ..fluid.layer_helper import LayerHelper from", "def _complex_to_real_dtype(dtype): if dtype == core.VarDesc.VarType.COMPLEX64: return core.VarDesc.VarType.FP32 elif dtype", ". Returns: Tensor: a tensor containing real values of the", "is_complex_dtype = (dtype == core.VarDesc.VarType.COMPLEX64 or dtype == core.VarDesc.VarType.COMPLEX128) return", "dtype='float32') y = paddle.arange(1, 5, dtype='int32') print(paddle.is_floating_point(x)) # True print(paddle.is_floating_point(y))", "complex64 or complex128. name (str, optional): The default value is", "'imag') helper = LayerHelper('imag', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='imag',", "optional): The default value is None. Normally there is no", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "tensor containing real values of the input tensor. Args: x", "could be complex64 or complex128. name (str, optional): The default", "there is no need for user to set this property.", "[4., 5., 6.]]) real_t = x.real() # Tensor(shape=[2, 3], dtype=float32,", "# [[1., 2., 3.], # [4., 5., 6.]]) \"\"\" if", "is_fp_dtype def is_integer(x): \"\"\"Return whether x is a tensor of", "paddle.real(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1., 2.,", "core.VarDesc.VarType.COMPLEX64 elif dtype == core.VarDesc.VarType.FP64: return core.VarDesc.VarType.COMPLEX128 else: return dtype", "return is_complex_dtype def is_floating_point(x): \"\"\" Returns whether the dtype of", "[[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) real_res = paddle.real(x)", "# noqa: F401 import paddle from paddle import _C_ops from", "LayerHelper from ..fluid.data_feeder import check_variable_and_dtype # TODO: define functions to", "Returns: Tensor: a tensor containing imaginary values of the input", "2., 1.]]) \"\"\" if in_dygraph_mode(): return _C_ops.final_state_imag(x) if _in_legacy_dygraph(): return", "place=CUDAPlace(0), stop_gradient=True, # [[6., 5., 4.], # [3., 2., 1.]])", "1.]]) \"\"\" if in_dygraph_mode(): return _C_ops.final_state_imag(x) if _in_legacy_dygraph(): return _C_ops.imag(x)", "None. Normally there is no need for user to set", "_complex_to_real_dtype(dtype): if dtype == core.VarDesc.VarType.COMPLEX64: return core.VarDesc.VarType.FP32 elif dtype ==", "Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode __all__ = [] def", "language governing permissions and # limitations under the License. from", "tensor. Examples: .. code-block:: python import paddle x = paddle.to_tensor(", "dtype=complex64, place=CUDAPlace(0), stop_gradient=True, # [[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j),", "3]) print(paddle.is_integer(x)) # True \"\"\" if not isinstance(x, (paddle.Tensor, paddle.static.Variable)):", "use this file except in compliance with the License. #", "place=CUDAPlace(0), stop_gradient=True, # [[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]])", "LayerHelper('imag', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='imag', inputs={'X': x}, outputs={'Out':", "set this property. For more information, please refer to :ref:`api_guide_Name`", "# [[6., 5., 4.], # [3., 2., 1.]]) imag_t =", "dtype == core.VarDesc.VarType.BF16) return is_fp_dtype def is_integer(x): \"\"\"Return whether x", "noqa: F401 import paddle from paddle import _C_ops from paddle.static", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "helper = LayerHelper('imag', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='imag', inputs={'X':", "dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out}) return out def imag(x,", "License. # You may obtain a copy of the License", "paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_integer(x)) # False x", "under the License is distributed on an \"AS IS\" BASIS,", "# limitations under the License. from __future__ import print_function from", "of the input is complex data type, otherwise false. Examples:", "License for the specific language governing permissions and # limitations", "print(paddle.is_complex(x)) # False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_complex(x)) #", ".. code-block:: python import paddle x = paddle.arange(1., 5., dtype='float32')", "..framework import core from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import", "a tensor of complex data type(complex64 or complex128). Args: x", "Reserved. # # Licensed under the Apache License, Version 2.0", "return _C_ops.final_state_imag(x) if _in_legacy_dygraph(): return _C_ops.imag(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'],", "__all__ = [] def _complex_to_real_dtype(dtype): if dtype == core.VarDesc.VarType.COMPLEX64: return", "paddle.float32, paddle.float16, and paddle.bfloat16. Args: x (Tensor): The input tensor.", "'complex128'], 'imag') helper = LayerHelper('imag', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype()))", "dtype == core.VarDesc.VarType.COMPLEX128: return core.VarDesc.VarType.FP64 else: return dtype def _real_to_complex_dtype(dtype):", "floating type, otherwise false. Examples: .. code-block:: python import paddle", "x = paddle.to_tensor( [[1 + 6j, 2 + 5j, 3", "(3+4j)], # [(4+3j), (5+2j), (6+1j)]]) real_res = paddle.real(x) # Tensor(shape=[2,", "+ 3j, 5 + 2j, 6 + 1j]]) # Tensor(shape=[2,", "helper = LayerHelper('real', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='real', inputs={'X':", "3.], # [4., 5., 6.]]) real_t = x.real() # Tensor(shape=[2,", "in compliance with the License. # You may obtain a", "refer to :ref:`api_guide_Name` . Returns: Tensor: a tensor containing imaginary", "== core.VarDesc.VarType.UINT8 or dtype == core.VarDesc.VarType.INT8 or dtype == core.VarDesc.VarType.INT16", "software # distributed under the License is distributed on an", "x.imag() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6., 5.,", "return core.VarDesc.VarType.FP64 else: return dtype def _real_to_complex_dtype(dtype): if dtype ==", "if _in_legacy_dygraph(): return _C_ops.imag(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') helper", "core.VarDesc.VarType.FP32 or dtype == core.VarDesc.VarType.FP64 or dtype == core.VarDesc.VarType.FP16 or", "imaginary values of the input tensor. Examples: .. code-block:: python", "'x', ['complex64', 'complex128'], 'imag') helper = LayerHelper('imag', **locals()) out =", "paddle.float16, and paddle.bfloat16. Args: x (Tensor): The input tensor. Returns:", "code-block:: python import paddle x = paddle.to_tensor( [[1 + 6j,", "stop_gradient=True, # [[6., 5., 4.], # [3., 2., 1.]]) imag_t", "Tensor: a tensor containing imaginary values of the input tensor.", "core from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype #", "# [4., 5., 6.]]) \"\"\" if in_dygraph_mode(): return _C_ops.final_state_real(x) if", "tensor containing imaginary values of input tensor. Args: x (Tensor):", "core.VarDesc.VarType.FP32 elif dtype == core.VarDesc.VarType.COMPLEX128: return core.VarDesc.VarType.FP64 else: return dtype", "# [[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) real_res =", "= paddle.to_tensor([1.1, 1.2]) print(paddle.is_complex(x)) # False x = paddle.to_tensor([1, 2,", "received type of x: {}\".format( type(x))) dtype = x.dtype is_complex_dtype", "2, 3]) print(paddle.is_integer(x)) # True \"\"\" if not isinstance(x, (paddle.Tensor,", "5., dtype='float32') y = paddle.arange(1, 5, dtype='int32') print(paddle.is_floating_point(x)) # True", "of the input tensor. Args: x (Tensor): the input tensor,", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. #", "# [(4+3j), (5+2j), (6+1j)]]) imag_res = paddle.imag(x) # Tensor(shape=[2, 3],", "property. For more information, please refer to :ref:`api_guide_Name` . Returns:", "# [[6., 5., 4.], # [3., 2., 1.]]) \"\"\" if", "and # limitations under the License. from __future__ import print_function", "= paddle.to_tensor([1.1, 1.2]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1, 2,", "the input tensor. Args: x (Tensor): the input tensor, its", "\"\"\" if in_dygraph_mode(): return _C_ops.final_state_real(x) if _in_legacy_dygraph(): return _C_ops.real(x) check_variable_and_dtype(x,", "if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError(\"Expected Tensor, but received", "one of paddle.float64, paddle.float32, paddle.float16, and paddle.bfloat16. Args: x (Tensor):", "or dtype == core.VarDesc.VarType.FP64 or dtype == core.VarDesc.VarType.FP16 or dtype", "'complex128'], 'real') helper = LayerHelper('real', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype()))", "= paddle.imag(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6.,", "\"\"\"Return whether x is a tensor of complex data type(complex64", "TODO: define functions to get tensor attributes from ..fluid.layers import", "print(paddle.is_complex(x)) # False \"\"\" if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise", "# [(4+3j), (5+2j), (6+1j)]]) real_res = paddle.real(x) # Tensor(shape=[2, 3],", "1.2]) print(paddle.is_complex(x)) # False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_complex(x))", "import shape # noqa: F401 import paddle from paddle import", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "== core.VarDesc.VarType.FP16 or dtype == core.VarDesc.VarType.BF16) return is_fp_dtype def is_integer(x):", "code-block:: python import paddle x = paddle.arange(1., 5., dtype='float32') y", "is complex data type, otherwise false. Examples: .. code-block:: python", "input tensor. Examples: .. code-block:: python import paddle x =", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "[3., 2., 1.]]) \"\"\" if in_dygraph_mode(): return _C_ops.final_state_imag(x) if _in_legacy_dygraph():", "else: return dtype def _real_to_complex_dtype(dtype): if dtype == core.VarDesc.VarType.FP32: return", "data type(complex64 or complex128). Args: x (Tensor): The input tensor.", "isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError(\"Expected Tensor, but received type of", "return _C_ops.imag(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') helper = LayerHelper('imag',", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "[(4+3j), (5+2j), (6+1j)]]) imag_res = paddle.imag(x) # Tensor(shape=[2, 3], dtype=float32,", "import paddle x = paddle.to_tensor( [[1 + 6j, 2 +", "to in writing, software # distributed under the License is", "import check_variable_and_dtype # TODO: define functions to get tensor attributes", "_in_legacy_dygraph(): return _C_ops.real(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') helper =", "# See the License for the specific language governing permissions", "(6+1j)]]) real_res = paddle.real(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,", "of paddle.float64, paddle.float32, paddle.float16, and paddle.bfloat16. Args: x (Tensor): The", "or agreed to in writing, software # distributed under the", "1j]]) # Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, # [[(1+6j), (2+5j),", "required by applicable law or agreed to in writing, software", "6j, 2 + 5j, 3 + 4j], [4 + 3j,", "functions to get tensor attributes from ..fluid.layers import rank #", "x = paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_complex(x)) #", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "received type of x: {}\".format( type(x))) dtype = x.dtype is_fp_dtype", "with the License. # You may obtain a copy of", "paddle x = paddle.to_tensor( [[1 + 6j, 2 + 5j,", "# False x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_integer(x)) # False x", "tensor containing real values of the input tensor. Examples: ..", "Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # #", "import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode __all__ = []", "[4., 5., 6.]]) \"\"\" if in_dygraph_mode(): return _C_ops.final_state_real(x) if _in_legacy_dygraph():", "x (Tensor): The input tensor. Returns: bool: True if the", "whether x is a tensor of complex data type(complex64 or", "if the data type of the input is complex data", "and paddle.bfloat16. Args: x (Tensor): The input tensor. Returns: bool:", "x: {}\".format( type(x))) dtype = x.dtype is_fp_dtype = (dtype ==", "data type. Args: x (Tensor): The input tensor. Returns: bool:", "paddle x = paddle.arange(1., 5., dtype='float32') y = paddle.arange(1, 5,", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "== core.VarDesc.VarType.FP32: return core.VarDesc.VarType.COMPLEX64 elif dtype == core.VarDesc.VarType.FP64: return core.VarDesc.VarType.COMPLEX128", "containing real values of the input tensor. Examples: .. code-block::", "1.]]) imag_t = x.imag() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,", "distributed under the License is distributed on an \"AS IS\"", "imag_res = paddle.imag(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, #", "but received type of x: {}\".format( type(x))) dtype = x.dtype", "false. Examples: .. code-block:: python import paddle x = paddle.arange(1.,", "2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under", "of the input tensor. Examples: .. code-block:: python import paddle", "or dtype == core.VarDesc.VarType.INT64) return is_int_dtype def real(x, name=None): \"\"\"", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "# [3., 2., 1.]]) imag_t = x.imag() # Tensor(shape=[2, 3],", "core.VarDesc.VarType.COMPLEX64: return core.VarDesc.VarType.FP32 elif dtype == core.VarDesc.VarType.COMPLEX128: return core.VarDesc.VarType.FP64 else:", "_in_legacy_dygraph, in_dygraph_mode __all__ = [] def _complex_to_real_dtype(dtype): if dtype ==", "Tensor, but received type of x: {}\".format( type(x))) dtype =", "is floating type, otherwise false. Examples: .. code-block:: python import", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "paddle.to_tensor([1, 2, 3]) print(paddle.is_integer(x)) # True \"\"\" if not isinstance(x,", "be complex64 or complex128. name (str, optional): The default value", "== core.VarDesc.VarType.COMPLEX128) return is_complex_dtype def is_floating_point(x): \"\"\" Returns whether the", "5j, 3 + 4j], [4 + 3j, 5 + 2j,", "not use this file except in compliance with the License.", "paddle.imag(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6., 5.,", "\"\"\" if in_dygraph_mode(): return _C_ops.final_state_imag(x) if _in_legacy_dygraph(): return _C_ops.imag(x) check_variable_and_dtype(x,", "refer to :ref:`api_guide_Name` . Returns: Tensor: a tensor containing real", "(paddle.Tensor, paddle.static.Variable)): raise TypeError(\"Expected Tensor, but received type of x:", "paddle x = paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_complex(x))", "under the License. from __future__ import print_function from ..framework import", "writing, software # distributed under the License is distributed on", "you may not use this file except in compliance with", "import core from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype", "outputs={'Out': out}) return out def imag(x, name=None): \"\"\" Returns a", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "import rank # noqa: F401 from ..fluid.layers import shape #", "real(x, name=None): \"\"\" Returns a new tensor containing real values", "if dtype == core.VarDesc.VarType.COMPLEX64: return core.VarDesc.VarType.FP32 elif dtype == core.VarDesc.VarType.COMPLEX128:", "input is integer data type, otherwise false. Examples: .. code-block::", "(dtype == core.VarDesc.VarType.UINT8 or dtype == core.VarDesc.VarType.INT8 or dtype ==", "\"\"\" Returns a new tensor containing real values of the", "a new tensor containing real values of the input tensor.", "or dtype == core.VarDesc.VarType.INT16 or dtype == core.VarDesc.VarType.INT32 or dtype", "# False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_integer(x)) # True", "to set this property. For more information, please refer to", "x = paddle.to_tensor([1, 2, 3]) print(paddle.is_integer(x)) # True \"\"\" if", "of x: {}\".format( type(x))) dtype = x.dtype is_fp_dtype = (dtype", "Returns a new tensor containing imaginary values of input tensor.", "CONDITIONS OF ANY KIND, either express or implied. # See", "import paddle x = paddle.to_tensor([1 + 2j, 3 + 4j])", ":ref:`api_guide_Name` . Returns: Tensor: a tensor containing real values of", "3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1., 2., 3.], # [4.,", "is no need for user to set this property. For", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "== core.VarDesc.VarType.FP64: return core.VarDesc.VarType.COMPLEX128 else: return dtype def is_complex(x): \"\"\"Return", "otherwise false. Examples: .. code-block:: python import paddle x =", "x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_complex(x)) # False x = paddle.to_tensor([1,", "(Tensor): the input tensor, its data type could be complex64", "value is None. Normally there is no need for user", "code-block:: python import paddle x = paddle.to_tensor([1 + 2j, 3", "3j, 5 + 2j, 6 + 1j]]) # Tensor(shape=[2, 3],", "or dtype == core.VarDesc.VarType.COMPLEX128) return is_complex_dtype def is_floating_point(x): \"\"\" Returns", "dtype='int32') print(paddle.is_floating_point(x)) # True print(paddle.is_floating_point(y)) # False \"\"\" if not", "== core.VarDesc.VarType.INT8 or dtype == core.VarDesc.VarType.INT16 or dtype == core.VarDesc.VarType.INT32", "type could be complex64 or complex128. name (str, optional): The", "complex data type(complex64 or complex128). Args: x (Tensor): The input", "paddle.to_tensor([1.1, 1.2]) print(paddle.is_complex(x)) # False x = paddle.to_tensor([1, 2, 3])", "4j]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_integer(x)) #", "True if the data type of the input is integer", "import paddle from paddle import _C_ops from paddle.static import Variable", "_real_to_complex_dtype(dtype): if dtype == core.VarDesc.VarType.FP32: return core.VarDesc.VarType.COMPLEX64 elif dtype ==", "TypeError(\"Expected Tensor, but received type of x: {}\".format( type(x))) dtype", "elif dtype == core.VarDesc.VarType.COMPLEX128: return core.VarDesc.VarType.FP64 else: return dtype def", "True if the dtype of `x` is floating type, otherwise", "type of the input is integer data type, otherwise false.", "out}) return out def imag(x, name=None): \"\"\" Returns a new", "out def imag(x, name=None): \"\"\" Returns a new tensor containing", "paddle.to_tensor( [[1 + 6j, 2 + 5j, 3 + 4j],", "helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out}) return out def imag(x, name=None):", "data type could be complex64 or complex128. name (str, optional):", "containing real values of the input tensor. Args: x (Tensor):", "`x` is one of paddle.float64, paddle.float32, paddle.float16, and paddle.bfloat16. Args:", "check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') helper = LayerHelper('real', **locals()) out", "or dtype == core.VarDesc.VarType.INT32 or dtype == core.VarDesc.VarType.INT64) return is_int_dtype", "# [4., 5., 6.]]) real_t = x.real() # Tensor(shape=[2, 3],", "if in_dygraph_mode(): return _C_ops.final_state_real(x) if _in_legacy_dygraph(): return _C_ops.real(x) check_variable_and_dtype(x, 'x',", "[[6., 5., 4.], # [3., 2., 1.]]) \"\"\" if in_dygraph_mode():", "OR CONDITIONS OF ANY KIND, either express or implied. #", "dtype = x.dtype is_fp_dtype = (dtype == core.VarDesc.VarType.FP32 or dtype", "tensor of integeral data type. Args: x (Tensor): The input", "need for user to set this property. For more information,", "core.VarDesc.VarType.INT64) return is_int_dtype def real(x, name=None): \"\"\" Returns a new", "the License is distributed on an \"AS IS\" BASIS, #", "is integer data type, otherwise false. Examples: .. code-block:: python", "Returns: Tensor: a tensor containing real values of the input", "False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_complex(x)) # False \"\"\"", "if dtype == core.VarDesc.VarType.FP32: return core.VarDesc.VarType.COMPLEX64 elif dtype == core.VarDesc.VarType.FP64:", "Returns whether the dtype of `x` is one of paddle.float64,", "type of x: {}\".format( type(x))) dtype = x.dtype is_int_dtype =", "The input tensor. Returns: bool: True if the dtype of", "+ 2j, 6 + 1j]]) # Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0),", "is_integer(x): \"\"\"Return whether x is a tensor of integeral data", "a tensor containing real values of the input tensor. Examples:", "paddle.float64, paddle.float32, paddle.float16, and paddle.bfloat16. Args: x (Tensor): The input", "get tensor attributes from ..fluid.layers import rank # noqa: F401", "this property. For more information, please refer to :ref:`api_guide_Name` .", "input tensor. Args: x (Tensor): the input tensor, its data", "for user to set this property. For more information, please", "not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError(\"Expected Tensor, but received type", "\"\"\"Return whether x is a tensor of integeral data type.", "# [[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) imag_res =", "4.], # [3., 2., 1.]]) \"\"\" if in_dygraph_mode(): return _C_ops.final_state_imag(x)", "type, otherwise false. Examples: .. code-block:: python import paddle x", "check_variable_and_dtype # TODO: define functions to get tensor attributes from", "dtype == core.VarDesc.VarType.INT8 or dtype == core.VarDesc.VarType.INT16 or dtype ==", "type of x: {}\".format( type(x))) dtype = x.dtype is_fp_dtype =", "law or agreed to in writing, software # distributed under", "= (dtype == core.VarDesc.VarType.COMPLEX64 or dtype == core.VarDesc.VarType.COMPLEX128) return is_complex_dtype", "2j, 3 + 4j]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1.1,", "of the input is integer data type, otherwise false. Examples:", "dtype = x.dtype is_int_dtype = (dtype == core.VarDesc.VarType.UINT8 or dtype", "to :ref:`api_guide_Name` . Returns: Tensor: a tensor containing real values", "..fluid.layers import rank # noqa: F401 from ..fluid.layers import shape", "the input tensor. Examples: .. code-block:: python import paddle x", "(dtype == core.VarDesc.VarType.COMPLEX64 or dtype == core.VarDesc.VarType.COMPLEX128) return is_complex_dtype def", "6 + 1j]]) # Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, #", "to :ref:`api_guide_Name` . Returns: Tensor: a tensor containing imaginary values", "Returns a new tensor containing real values of the input", "6.]]) \"\"\" if in_dygraph_mode(): return _C_ops.final_state_real(x) if _in_legacy_dygraph(): return _C_ops.real(x)", "(dtype == core.VarDesc.VarType.FP32 or dtype == core.VarDesc.VarType.FP64 or dtype ==", "_C_ops.real(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') helper = LayerHelper('real', **locals())", "5., 6.]]) real_t = x.real() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0),", "# [[1., 2., 3.], # [4., 5., 6.]]) real_t =", "from ..fluid.layers import rank # noqa: F401 from ..fluid.layers import", "False \"\"\" if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError(\"Expected Tensor,", "# True \"\"\" if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError(\"Expected", "2 + 5j, 3 + 4j], [4 + 3j, 5", "True print(paddle.is_floating_point(y)) # False \"\"\" if not isinstance(x, (paddle.Tensor, paddle.static.Variable)):", "3.], # [4., 5., 6.]]) \"\"\" if in_dygraph_mode(): return _C_ops.final_state_real(x)", "new tensor containing real values of the input tensor. Args:", "tensor, its data type could be complex64 or complex128. name", "1.2]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_integer(x))", "6.]]) real_t = x.real() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,", "may obtain a copy of the License at # #", "(3+4j)], # [(4+3j), (5+2j), (6+1j)]]) imag_res = paddle.imag(x) # Tensor(shape=[2,", "dtype of `x` is one of paddle.float64, paddle.float32, paddle.float16, and", "core.VarDesc.VarType.FP64 else: return dtype def _real_to_complex_dtype(dtype): if dtype == core.VarDesc.VarType.FP32:", "complex128). Args: x (Tensor): The input tensor. Returns: bool: True", "# Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1., 2., 3.],", "is a tensor of complex data type(complex64 or complex128). Args:", "type. Args: x (Tensor): The input tensor. Returns: bool: True", "a tensor of integeral data type. Args: x (Tensor): The", "check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') helper = LayerHelper('imag', **locals()) out", "dtype == core.VarDesc.VarType.COMPLEX64: return core.VarDesc.VarType.FP32 elif dtype == core.VarDesc.VarType.COMPLEX128: return", "attributes from ..fluid.layers import rank # noqa: F401 from ..fluid.layers", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "\"\"\" Returns whether the dtype of `x` is one of", "type(x))) dtype = x.dtype is_fp_dtype = (dtype == core.VarDesc.VarType.FP32 or", "input tensor, its data type could be complex64 or complex128.", "if _in_legacy_dygraph(): return _C_ops.real(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') helper", "dtype == core.VarDesc.VarType.COMPLEX128) return is_complex_dtype def is_floating_point(x): \"\"\" Returns whether", "stop_gradient=True, # [[6., 5., 4.], # [3., 2., 1.]]) \"\"\"", "_C_ops.final_state_real(x) if _in_legacy_dygraph(): return _C_ops.real(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real')", "x = paddle.to_tensor([1, 2, 3]) print(paddle.is_complex(x)) # False \"\"\" if", "is one of paddle.float64, paddle.float32, paddle.float16, and paddle.bfloat16. Args: x", "may not use this file except in compliance with the", "paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_complex(x)) # True x", "= paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_integer(x)) # False", "== core.VarDesc.VarType.INT64) return is_int_dtype def real(x, name=None): \"\"\" Returns a", ":ref:`api_guide_Name` . Returns: Tensor: a tensor containing imaginary values of", "_in_legacy_dygraph(): return _C_ops.imag(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') helper =", "else: return dtype def is_complex(x): \"\"\"Return whether x is a", "# Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6., 5., 4.],", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "input is complex data type, otherwise false. Examples: .. code-block::", "the data type of the input is integer data type,", "of integeral data type. Args: x (Tensor): The input tensor.", "is_complex_dtype def is_floating_point(x): \"\"\" Returns whether the dtype of `x`", "this file except in compliance with the License. # You", "def _real_to_complex_dtype(dtype): if dtype == core.VarDesc.VarType.FP32: return core.VarDesc.VarType.COMPLEX64 elif dtype", "return _C_ops.final_state_real(x) if _in_legacy_dygraph(): return _C_ops.real(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'],", "dtype def _real_to_complex_dtype(dtype): if dtype == core.VarDesc.VarType.FP32: return core.VarDesc.VarType.COMPLEX64 elif", "a tensor containing imaginary values of the input tensor. Examples:", "the dtype of `x` is one of paddle.float64, paddle.float32, paddle.float16,", "(Tensor): The input tensor. Returns: bool: True if the data", "dtype of `x` is floating type, otherwise false. Examples: ..", "type(x))) dtype = x.dtype is_int_dtype = (dtype == core.VarDesc.VarType.UINT8 or", "dtype == core.VarDesc.VarType.INT16 or dtype == core.VarDesc.VarType.INT32 or dtype ==", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "or dtype == core.VarDesc.VarType.FP16 or dtype == core.VarDesc.VarType.BF16) return is_fp_dtype", "F401 from ..fluid.layers import shape # noqa: F401 import paddle", "a new tensor containing imaginary values of input tensor. Args:", "# # Licensed under the Apache License, Version 2.0 (the", "= (dtype == core.VarDesc.VarType.UINT8 or dtype == core.VarDesc.VarType.INT8 or dtype", "return out def imag(x, name=None): \"\"\" Returns a new tensor", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "or dtype == core.VarDesc.VarType.BF16) return is_fp_dtype def is_integer(x): \"\"\"Return whether", "type(x))) dtype = x.dtype is_complex_dtype = (dtype == core.VarDesc.VarType.COMPLEX64 or", "of `x` is one of paddle.float64, paddle.float32, paddle.float16, and paddle.bfloat16.", "== core.VarDesc.VarType.INT16 or dtype == core.VarDesc.VarType.INT32 or dtype == core.VarDesc.VarType.INT64)", "Normally there is no need for user to set this", "rank # noqa: F401 from ..fluid.layers import shape # noqa:", "_C_ops from paddle.static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode", "[[1., 2., 3.], # [4., 5., 6.]]) real_t = x.real()", "5., 4.], # [3., 2., 1.]]) \"\"\" if in_dygraph_mode(): return", "tensor of complex data type(complex64 or complex128). Args: x (Tensor):", "imaginary values of input tensor. Args: x (Tensor): the input", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "'real') helper = LayerHelper('real', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='real',", "= x.dtype is_fp_dtype = (dtype == core.VarDesc.VarType.FP32 or dtype ==", "4.], # [3., 2., 1.]]) imag_t = x.imag() # Tensor(shape=[2,", "{}\".format( type(x))) dtype = x.dtype is_fp_dtype = (dtype == core.VarDesc.VarType.FP32", "+ 2j, 3 + 4j]) print(paddle.is_complex(x)) # True x =", "return core.VarDesc.VarType.COMPLEX64 elif dtype == core.VarDesc.VarType.FP64: return core.VarDesc.VarType.COMPLEX128 else: return", "**locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='imag', inputs={'X': x}, outputs={'Out': out})", "python import paddle x = paddle.arange(1., 5., dtype='float32') y =", "# False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_complex(x)) # False", "is a tensor of integeral data type. Args: x (Tensor):", "integer data type, otherwise false. Examples: .. code-block:: python import", "[3., 2., 1.]]) imag_t = x.imag() # Tensor(shape=[2, 3], dtype=float32,", "out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out}) return", "def is_integer(x): \"\"\"Return whether x is a tensor of integeral", "= x.real() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1.,", "== core.VarDesc.VarType.COMPLEX64 or dtype == core.VarDesc.VarType.COMPLEX128) return is_complex_dtype def is_floating_point(x):", "True \"\"\" if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError(\"Expected Tensor,", "+ 4j], [4 + 3j, 5 + 2j, 6 +", "return dtype def is_complex(x): \"\"\"Return whether x is a tensor", "5, dtype='int32') print(paddle.is_floating_point(x)) # True print(paddle.is_floating_point(y)) # False \"\"\" if", ".. code-block:: python import paddle x = paddle.to_tensor([1 + 2j,", "4j], [4 + 3j, 5 + 2j, 6 + 1j]])", "imag(x, name=None): \"\"\" Returns a new tensor containing imaginary values", "_C_ops.final_state_imag(x) if _in_legacy_dygraph(): return _C_ops.imag(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag')", "x is a tensor of complex data type(complex64 or complex128).", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "complex data type, otherwise false. Examples: .. code-block:: python import", "core.VarDesc.VarType.FP64 or dtype == core.VarDesc.VarType.FP16 or dtype == core.VarDesc.VarType.BF16) return", "of complex data type(complex64 or complex128). Args: x (Tensor): The", "= paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_complex(x)) # True", "# False \"\"\" if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError(\"Expected", "or implied. # See the License for the specific language", "governing permissions and # limitations under the License. from __future__", "Rights Reserved. # # Licensed under the Apache License, Version", "elif dtype == core.VarDesc.VarType.FP64: return core.VarDesc.VarType.COMPLEX128 else: return dtype def", "x: {}\".format( type(x))) dtype = x.dtype is_complex_dtype = (dtype ==", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "+ 5j, 3 + 4j], [4 + 3j, 5 +", "\"\"\" if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError(\"Expected Tensor, but", "4j]) print(paddle.is_complex(x)) # True x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_complex(x)) #", "dtype == core.VarDesc.VarType.FP64 or dtype == core.VarDesc.VarType.FP16 or dtype ==", "is_int_dtype = (dtype == core.VarDesc.VarType.UINT8 or dtype == core.VarDesc.VarType.INT8 or", "default value is None. Normally there is no need for", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "tensor. Returns: bool: True if the data type of the", "noqa: F401 from ..fluid.layers import shape # noqa: F401 import", "the data type of the input is complex data type,", "paddle x = paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_integer(x))", "from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype # TODO:", "# True x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_complex(x)) # False x", "== core.VarDesc.VarType.BF16) return is_fp_dtype def is_integer(x): \"\"\"Return whether x is", "x.dtype is_complex_dtype = (dtype == core.VarDesc.VarType.COMPLEX64 or dtype == core.VarDesc.VarType.COMPLEX128)", "(the \"License\"); # you may not use this file except", "of x: {}\".format( type(x))) dtype = x.dtype is_complex_dtype = (dtype", "Examples: .. code-block:: python import paddle x = paddle.to_tensor([1 +", "limitations under the License. from __future__ import print_function from ..framework", "# you may not use this file except in compliance", "or complex128. name (str, optional): The default value is None.", "containing imaginary values of input tensor. Args: x (Tensor): the", "paddle.to_tensor([1.1, 1.2]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1, 2, 3])", "or dtype == core.VarDesc.VarType.INT8 or dtype == core.VarDesc.VarType.INT16 or dtype", "core.VarDesc.VarType.INT32 or dtype == core.VarDesc.VarType.INT64) return is_int_dtype def real(x, name=None):", "real values of the input tensor. Examples: .. code-block:: python", "dtype == core.VarDesc.VarType.FP16 or dtype == core.VarDesc.VarType.BF16) return is_fp_dtype def", "inputs={'X': x}, outputs={'Out': out}) return out def imag(x, name=None): \"\"\"", "F401 import paddle from paddle import _C_ops from paddle.static import", "real_res = paddle.real(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, #", "permissions and # limitations under the License. from __future__ import", "bool: True if the data type of the input is", "import print_function from ..framework import core from ..fluid.layer_helper import LayerHelper", "x.dtype is_int_dtype = (dtype == core.VarDesc.VarType.UINT8 or dtype == core.VarDesc.VarType.INT8", "the dtype of `x` is floating type, otherwise false. Examples:", "paddle.arange(1, 5, dtype='int32') print(paddle.is_floating_point(x)) # True print(paddle.is_floating_point(y)) # False \"\"\"", "License. from __future__ import print_function from ..framework import core from", "core.VarDesc.VarType.BF16) return is_fp_dtype def is_integer(x): \"\"\"Return whether x is a", "# # Unless required by applicable law or agreed to", "dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6., 5., 4.], # [3., 2.,", "dtype = x.dtype is_complex_dtype = (dtype == core.VarDesc.VarType.COMPLEX64 or dtype", "bool: True if the dtype of `x` is floating type,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "complex128. name (str, optional): The default value is None. Normally", "print(paddle.is_integer(x)) # False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_integer(x)) #", "# True print(paddle.is_floating_point(y)) # False \"\"\" if not isinstance(x, (paddle.Tensor,", "Version 2.0 (the \"License\"); # you may not use this", "import _in_legacy_dygraph, in_dygraph_mode __all__ = [] def _complex_to_real_dtype(dtype): if dtype", "data type, otherwise false. Examples: .. code-block:: python import paddle", "(Tensor): The input tensor. Returns: bool: True if the dtype", "print(paddle.is_floating_point(y)) # False \"\"\" if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise", "place=CUDAPlace(0), stop_gradient=True, # [[1., 2., 3.], # [4., 5., 6.]])", "+ 4j]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_integer(x))", "its data type could be complex64 or complex128. name (str,", "[[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) imag_res = paddle.imag(x)", "implied. # See the License for the specific language governing", "whether x is a tensor of integeral data type. Args:", "== core.VarDesc.VarType.FP32 or dtype == core.VarDesc.VarType.FP64 or dtype == core.VarDesc.VarType.FP16", "under the Apache License, Version 2.0 (the \"License\"); # you", "name (str, optional): The default value is None. Normally there", "return dtype def _real_to_complex_dtype(dtype): if dtype == core.VarDesc.VarType.FP32: return core.VarDesc.VarType.COMPLEX64", "def is_complex(x): \"\"\"Return whether x is a tensor of complex", "False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_integer(x)) # True \"\"\"", "[[1 + 6j, 2 + 5j, 3 + 4j], [4", "# Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, # [[(1+6j), (2+5j), (3+4j)],", ".. code-block:: python import paddle x = paddle.to_tensor( [[1 +", "is_floating_point(x): \"\"\" Returns whether the dtype of `x` is one", "is_int_dtype def real(x, name=None): \"\"\" Returns a new tensor containing", "from ..fluid.data_feeder import check_variable_and_dtype # TODO: define functions to get", "by applicable law or agreed to in writing, software #", "Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1., 2., 3.], #", "if in_dygraph_mode(): return _C_ops.final_state_imag(x) if _in_legacy_dygraph(): return _C_ops.imag(x) check_variable_and_dtype(x, 'x',", "(2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) real_res = paddle.real(x) #", "print(paddle.is_floating_point(x)) # True print(paddle.is_floating_point(y)) # False \"\"\" if not isinstance(x,", "= (dtype == core.VarDesc.VarType.FP32 or dtype == core.VarDesc.VarType.FP64 or dtype", "# [3., 2., 1.]]) \"\"\" if in_dygraph_mode(): return _C_ops.final_state_imag(x) if", "= LayerHelper('real', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='real', inputs={'X': x},", "(2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) imag_res = paddle.imag(x) #", "(6+1j)]]) imag_res = paddle.imag(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,", "of `x` is floating type, otherwise false. Examples: .. code-block::", "Examples: .. code-block:: python import paddle x = paddle.arange(1., 5.,", "no need for user to set this property. For more", "Args: x (Tensor): The input tensor. Returns: bool: True if", "import _C_ops from paddle.static import Variable from ..fluid.framework import _in_legacy_dygraph,", "stop_gradient=True, # [[1., 2., 3.], # [4., 5., 6.]]) real_t", "tensor. Args: x (Tensor): the input tensor, its data type", "tensor attributes from ..fluid.layers import rank # noqa: F401 from", "data type of the input is complex data type, otherwise", "..fluid.framework import _in_legacy_dygraph, in_dygraph_mode __all__ = [] def _complex_to_real_dtype(dtype): if", "The default value is None. Normally there is no need", "x = paddle.arange(1., 5., dtype='float32') y = paddle.arange(1, 5, dtype='int32')", "the input is integer data type, otherwise false. Examples: ..", "[[1., 2., 3.], # [4., 5., 6.]]) \"\"\" if in_dygraph_mode():", "2., 3.], # [4., 5., 6.]]) \"\"\" if in_dygraph_mode(): return", "= paddle.arange(1, 5, dtype='int32') print(paddle.is_floating_point(x)) # True print(paddle.is_floating_point(y)) # False", "= x.dtype is_int_dtype = (dtype == core.VarDesc.VarType.UINT8 or dtype ==", "paddle.to_tensor([1, 2, 3]) print(paddle.is_complex(x)) # False \"\"\" if not isinstance(x,", "[[6., 5., 4.], # [3., 2., 1.]]) imag_t = x.imag()", "= LayerHelper('imag', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='imag', inputs={'X': x},", "imag_t = x.imag() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, #", "of x: {}\".format( type(x))) dtype = x.dtype is_int_dtype = (dtype", "type of x: {}\".format( type(x))) dtype = x.dtype is_complex_dtype =", "from paddle.static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode __all__", "core.VarDesc.VarType.COMPLEX128: return core.VarDesc.VarType.FP64 else: return dtype def _real_to_complex_dtype(dtype): if dtype", "[(4+3j), (5+2j), (6+1j)]]) real_res = paddle.real(x) # Tensor(shape=[2, 3], dtype=float32,", "['complex64', 'complex128'], 'real') helper = LayerHelper('real', **locals()) out = helper.create_variable_for_type_inference(", "helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out}) return out def", "3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, # [[(1+6j), (2+5j), (3+4j)], # [(4+3j),", "x: {}\".format( type(x))) dtype = x.dtype is_int_dtype = (dtype ==", "in_dygraph_mode(): return _C_ops.final_state_imag(x) if _in_legacy_dygraph(): return _C_ops.imag(x) check_variable_and_dtype(x, 'x', ['complex64',", "the License. from __future__ import print_function from ..framework import core", "dtype == core.VarDesc.VarType.FP32: return core.VarDesc.VarType.COMPLEX64 elif dtype == core.VarDesc.VarType.FP64: return", "x.dtype is_fp_dtype = (dtype == core.VarDesc.VarType.FP32 or dtype == core.VarDesc.VarType.FP64", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "from ..framework import core from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder", "Unless required by applicable law or agreed to in writing,", "information, please refer to :ref:`api_guide_Name` . Returns: Tensor: a tensor", "{}\".format( type(x))) dtype = x.dtype is_complex_dtype = (dtype == core.VarDesc.VarType.COMPLEX64", "(5+2j), (6+1j)]]) imag_res = paddle.imag(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0),", "python import paddle x = paddle.to_tensor( [[1 + 6j, 2", "= paddle.to_tensor([1, 2, 3]) print(paddle.is_integer(x)) # True \"\"\" if not", "the input is complex data type, otherwise false. Examples: ..", "core.VarDesc.VarType.FP32: return core.VarDesc.VarType.COMPLEX64 elif dtype == core.VarDesc.VarType.FP64: return core.VarDesc.VarType.COMPLEX128 else:", "type(complex64 or complex128). Args: x (Tensor): The input tensor. Returns:", "..fluid.data_feeder import check_variable_and_dtype # TODO: define functions to get tensor", "3]) print(paddle.is_complex(x)) # False \"\"\" if not isinstance(x, (paddle.Tensor, paddle.static.Variable)):", "the specific language governing permissions and # limitations under the", "__future__ import print_function from ..framework import core from ..fluid.layer_helper import", "print(paddle.is_integer(x)) # False x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_integer(x)) # False", "{}\".format( type(x))) dtype = x.dtype is_int_dtype = (dtype == core.VarDesc.VarType.UINT8", "applicable law or agreed to in writing, software # distributed", "whether the dtype of `x` is one of paddle.float64, paddle.float32,", "= paddle.to_tensor( [[1 + 6j, 2 + 5j, 3 +", "PaddlePaddle Authors. All Rights Reserved. # # Licensed under the", "containing imaginary values of the input tensor. Examples: .. code-block::", "x is a tensor of integeral data type. Args: x", "new tensor containing imaginary values of input tensor. Args: x", "stop_gradient=True, # [[1., 2., 3.], # [4., 5., 6.]]) \"\"\"", "in writing, software # distributed under the License is distributed", "import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype # TODO: define functions", "# TODO: define functions to get tensor attributes from ..fluid.layers", "`x` is floating type, otherwise false. Examples: .. code-block:: python", "Examples: .. code-block:: python import paddle x = paddle.to_tensor( [[1", "== core.VarDesc.VarType.COMPLEX64: return core.VarDesc.VarType.FP32 elif dtype == core.VarDesc.VarType.COMPLEX128: return core.VarDesc.VarType.FP64", "+ 2j, 3 + 4j]) print(paddle.is_integer(x)) # False x =", "= paddle.arange(1., 5., dtype='float32') y = paddle.arange(1, 5, dtype='int32') print(paddle.is_floating_point(x))", "(5+2j), (6+1j)]]) real_res = paddle.real(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0),", "= helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='imag', inputs={'X': x}, outputs={'Out': out}) return out", "core.VarDesc.VarType.COMPLEX128 else: return dtype def is_complex(x): \"\"\"Return whether x is", "= helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out}) return out", "5 + 2j, 6 + 1j]]) # Tensor(shape=[2, 3], dtype=complex64,", "y = paddle.arange(1, 5, dtype='int32') print(paddle.is_floating_point(x)) # True print(paddle.is_floating_point(y)) #", "dtype == core.VarDesc.VarType.INT32 or dtype == core.VarDesc.VarType.INT64) return is_int_dtype def", "paddle.static.Variable)): raise TypeError(\"Expected Tensor, but received type of x: {}\".format(", "2j, 6 + 1j]]) # Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,", "5., 4.], # [3., 2., 1.]]) imag_t = x.imag() #", "paddle import _C_ops from paddle.static import Variable from ..fluid.framework import", "python import paddle x = paddle.to_tensor([1 + 2j, 3 +", "define functions to get tensor attributes from ..fluid.layers import rank", "in_dygraph_mode __all__ = [] def _complex_to_real_dtype(dtype): if dtype == core.VarDesc.VarType.COMPLEX64:", "import paddle x = paddle.arange(1., 5., dtype='float32') y = paddle.arange(1,", "shape # noqa: F401 import paddle from paddle import _C_ops", "name=None): \"\"\" Returns a new tensor containing real values of", "= paddle.to_tensor([1, 2, 3]) print(paddle.is_complex(x)) # False \"\"\" if not", "real_t = x.real() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, #", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "def is_floating_point(x): \"\"\" Returns whether the dtype of `x` is", "License, Version 2.0 (the \"License\"); # you may not use", "input tensor. Returns: bool: True if the dtype of `x`", "out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='imag', inputs={'X': x}, outputs={'Out': out}) return", "if the dtype of `x` is floating type, otherwise false.", "# You may obtain a copy of the License at", "stop_gradient=True, # [[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) real_res", "dtype def is_complex(x): \"\"\"Return whether x is a tensor of", "[4 + 3j, 5 + 2j, 6 + 1j]]) #", "core.VarDesc.VarType.UINT8 or dtype == core.VarDesc.VarType.INT8 or dtype == core.VarDesc.VarType.INT16 or", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "'x', ['complex64', 'complex128'], 'real') helper = LayerHelper('real', **locals()) out =", "3 + 4j]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1.1, 1.2])", "stop_gradient=True, # [[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) imag_res", "Authors. All Rights Reserved. # # Licensed under the Apache", "2j, 3 + 4j]) print(paddle.is_complex(x)) # True x = paddle.to_tensor([1.1,", "print(paddle.is_complex(x)) # True x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_complex(x)) # False", "def real(x, name=None): \"\"\" Returns a new tensor containing real", "x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1,", "For more information, please refer to :ref:`api_guide_Name` . Returns: Tensor:", "core.VarDesc.VarType.COMPLEX128) return is_complex_dtype def is_floating_point(x): \"\"\" Returns whether the dtype", "return is_fp_dtype def is_integer(x): \"\"\"Return whether x is a tensor", "True if the data type of the input is complex", "(str, optional): The default value is None. Normally there is", "raise TypeError(\"Expected Tensor, but received type of x: {}\".format( type(x)))", "= x.imag() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6.,", "the License for the specific language governing permissions and #", "values of the input tensor. Args: x (Tensor): the input", "in_dygraph_mode(): return _C_ops.final_state_real(x) if _in_legacy_dygraph(): return _C_ops.real(x) check_variable_and_dtype(x, 'x', ['complex64',", "Apache License, Version 2.0 (the \"License\"); # you may not", "is_complex(x): \"\"\"Return whether x is a tensor of complex data", "= paddle.real(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1.,", "The input tensor. Returns: bool: True if the data type", "either express or implied. # See the License for the", ". Returns: Tensor: a tensor containing imaginary values of the", "if the data type of the input is integer data", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "core.VarDesc.VarType.COMPLEX64 or dtype == core.VarDesc.VarType.COMPLEX128) return is_complex_dtype def is_floating_point(x): \"\"\"", "== core.VarDesc.VarType.FP64 or dtype == core.VarDesc.VarType.FP16 or dtype == core.VarDesc.VarType.BF16)", "paddle from paddle import _C_ops from paddle.static import Variable from", "..fluid.layers import shape # noqa: F401 import paddle from paddle", "return core.VarDesc.VarType.COMPLEX128 else: return dtype def is_complex(x): \"\"\"Return whether x", "from paddle import _C_ops from paddle.static import Variable from ..fluid.framework", "data type of the input is integer data type, otherwise", "[] def _complex_to_real_dtype(dtype): if dtype == core.VarDesc.VarType.COMPLEX64: return core.VarDesc.VarType.FP32 elif", "..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype # TODO: define", "= x.dtype is_complex_dtype = (dtype == core.VarDesc.VarType.COMPLEX64 or dtype ==", "type of the input is complex data type, otherwise false.", "Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6., 5., 4.], #", "+ 1j]]) # Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, # [[(1+6j),", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "values of input tensor. Args: x (Tensor): the input tensor,", "+ 6j, 2 + 5j, 3 + 4j], [4 +", "return _C_ops.real(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') helper = LayerHelper('real',", "name=None): \"\"\" Returns a new tensor containing imaginary values of", "x = paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_integer(x)) #", "(c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed", "input tensor. Returns: bool: True if the data type of", "3 + 4j]) print(paddle.is_complex(x)) # True x = paddle.to_tensor([1.1, 1.2])", "Returns: bool: True if the dtype of `x` is floating", "2., 1.]]) imag_t = x.imag() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0),", "**locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out})", "print(paddle.is_integer(x)) # True \"\"\" if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise", "2., 3.], # [4., 5., 6.]]) real_t = x.real() #", "x}, outputs={'Out': out}) return out def imag(x, name=None): \"\"\" Returns", "3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6., 5., 4.], # [3.,", "['complex64', 'complex128'], 'imag') helper = LayerHelper('imag', **locals()) out = helper.create_variable_for_type_inference(", "LayerHelper('real', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='real', inputs={'X': x}, outputs={'Out':", "paddle.bfloat16. Args: x (Tensor): The input tensor. Returns: bool: True", "\"License\"); # you may not use this file except in", "True x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_complex(x)) # False x =", "def imag(x, name=None): \"\"\" Returns a new tensor containing imaginary", "dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1., 2., 3.], # [4., 5.,", "Returns: bool: True if the data type of the input", "x.real() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1., 2.,", "values of the input tensor. Examples: .. code-block:: python import", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "core.VarDesc.VarType.INT8 or dtype == core.VarDesc.VarType.INT16 or dtype == core.VarDesc.VarType.INT32 or", "from ..fluid.layers import shape # noqa: F401 import paddle from", "false. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1", "paddle.static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode __all__ =", "core.VarDesc.VarType.INT16 or dtype == core.VarDesc.VarType.INT32 or dtype == core.VarDesc.VarType.INT64) return", "# distributed under the License is distributed on an \"AS", "is_fp_dtype = (dtype == core.VarDesc.VarType.FP32 or dtype == core.VarDesc.VarType.FP64 or", "to get tensor attributes from ..fluid.layers import rank # noqa:", "integeral data type. Args: x (Tensor): The input tensor. Returns:", "# Unless required by applicable law or agreed to in", "# noqa: F401 from ..fluid.layers import shape # noqa: F401", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "+ 4j]) print(paddle.is_complex(x)) # True x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_complex(x))", "Args: x (Tensor): the input tensor, its data type could", "paddle.arange(1., 5., dtype='float32') y = paddle.arange(1, 5, dtype='int32') print(paddle.is_floating_point(x)) #", "tensor. Returns: bool: True if the dtype of `x` is", "dtype == core.VarDesc.VarType.INT64) return is_int_dtype def real(x, name=None): \"\"\" Returns", "real values of the input tensor. Args: x (Tensor): the", "False x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_integer(x)) # False x =", "You may obtain a copy of the License at #", "received type of x: {}\".format( type(x))) dtype = x.dtype is_int_dtype", "x (Tensor): the input tensor, its data type could be", "or complex128). Args: x (Tensor): The input tensor. Returns: bool:", "2, 3]) print(paddle.is_complex(x)) # False \"\"\" if not isinstance(x, (paddle.Tensor,", "core.VarDesc.VarType.FP64: return core.VarDesc.VarType.COMPLEX128 else: return dtype def is_complex(x): \"\"\"Return whether", "Tensor: a tensor containing real values of the input tensor.", "is None. Normally there is no need for user to", "more information, please refer to :ref:`api_guide_Name` . Returns: Tensor: a", "the Apache License, Version 2.0 (the \"License\"); # you may", "_C_ops.imag(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') helper = LayerHelper('imag', **locals())", "return is_int_dtype def real(x, name=None): \"\"\" Returns a new tensor", "5., 6.]]) \"\"\" if in_dygraph_mode(): return _C_ops.final_state_real(x) if _in_legacy_dygraph(): return", "the input tensor, its data type could be complex64 or", "Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, # [[(1+6j), (2+5j), (3+4j)], #", "from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode __all__ = [] def _complex_to_real_dtype(dtype):" ]
[ "\"\"\"Raised when a release has a non-string 'date' value\"\"\" class", "non-string 'date' value\"\"\" class InconsistentTypeError(OCDSMergeError, TypeError): \"\"\"Raised when a path", "message): self.key = key self.message = message def __str__(self): return", "same array have the same value for the 'id' field\"\"\"", "def __init__(self, key, message): self.key = key self.message = message", "'date' key\"\"\" def __init__(self, key, message): self.key = key self.message", "when a release has a null 'date' value\"\"\" class NonStringDateValueError(OCDSMergeError,", "OCDSMergeWarning(UserWarning): \"\"\"Base class for warnings from within this package\"\"\" class", "self.key = key self.message = message def __str__(self): return str(self.message)", "id, message): self.path = path self.id = id self.message =", "KeyError): \"\"\"Raised when a release is missing a 'date' key\"\"\"", "\"\"\"Raised when a release has a null 'date' value\"\"\" class", "TypeError): \"\"\"Raised when a release is not an object\"\"\" class", "release is not an object\"\"\" class NullDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when", "object\"\"\" class NullDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a release has a", "value for the 'id' field\"\"\" def __init__(self, path, id, message):", "in the same array have the same value for the", "message def __str__(self): return str(self.message) class NonObjectReleaseError(OCDSMergeError, TypeError): \"\"\"Raised when", "class InconsistentTypeError(OCDSMergeError, TypeError): \"\"\"Raised when a path is a literal", "from within this package\"\"\" class DuplicateIdValueWarning(OCDSMergeWarning): \"\"\"Used when at least", "'date' value\"\"\" class InconsistentTypeError(OCDSMergeError, TypeError): \"\"\"Raised when a path is", "class MissingDateKeyError(OCDSMergeError, KeyError): \"\"\"Raised when a release is missing a", "key, message): self.key = key self.message = message def __str__(self):", "missing a 'date' key\"\"\" def __init__(self, key, message): self.key =", "array have the same value for the 'id' field\"\"\" def", "\"\"\"Base class for warnings from within this package\"\"\" class DuplicateIdValueWarning(OCDSMergeWarning):", "release is missing a 'date' key\"\"\" def __init__(self, key, message):", "class NullDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a release has a null", "<reponame>open-contracting/ocds-merge class OCDSMergeError(Exception): \"\"\"Base class for exceptions from within this", "__str__(self): return str(self.message) class NonObjectReleaseError(OCDSMergeError, TypeError): \"\"\"Raised when a release", "TypeError): \"\"\"Raised when a release has a null 'date' value\"\"\"", "two objects in the same array have the same value", "self.id = id self.message = message def __str__(self): return str(self.message)", "__init__(self, path, id, message): self.path = path self.id = id", "when at least two objects in the same array have", "has a non-string 'date' value\"\"\" class InconsistentTypeError(OCDSMergeError, TypeError): \"\"\"Raised when", "field\"\"\" def __init__(self, path, id, message): self.path = path self.id", "when a release is missing a 'date' key\"\"\" def __init__(self,", "= message def __str__(self): return str(self.message) class NonObjectReleaseError(OCDSMergeError, TypeError): \"\"\"Raised", "class NonObjectReleaseError(OCDSMergeError, TypeError): \"\"\"Raised when a release is not an", "a release has a non-string 'date' value\"\"\" class InconsistentTypeError(OCDSMergeError, TypeError):", "for warnings from within this package\"\"\" class DuplicateIdValueWarning(OCDSMergeWarning): \"\"\"Used when", "when a release has a non-string 'date' value\"\"\" class InconsistentTypeError(OCDSMergeError,", "warnings from within this package\"\"\" class DuplicateIdValueWarning(OCDSMergeWarning): \"\"\"Used when at", "not an object\"\"\" class NullDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a release", "message): self.path = path self.id = id self.message = message", "__init__(self, key, message): self.key = key self.message = message def", "within this package\"\"\" class MissingDateKeyError(OCDSMergeError, KeyError): \"\"\"Raised when a release", "TypeError): \"\"\"Raised when a path is a literal and an", "release has a null 'date' value\"\"\" class NonStringDateValueError(OCDSMergeError, TypeError): \"\"\"Raised", "different releases\"\"\" class OCDSMergeWarning(UserWarning): \"\"\"Base class for warnings from within", "in different releases\"\"\" class OCDSMergeWarning(UserWarning): \"\"\"Base class for warnings from", "the same array have the same value for the 'id'", "a release is missing a 'date' key\"\"\" def __init__(self, key,", "object in different releases\"\"\" class OCDSMergeWarning(UserWarning): \"\"\"Base class for warnings", "str(self.message) class NonObjectReleaseError(OCDSMergeError, TypeError): \"\"\"Raised when a release is not", "objects in the same array have the same value for", "for exceptions from within this package\"\"\" class MissingDateKeyError(OCDSMergeError, KeyError): \"\"\"Raised", "\"\"\"Raised when a path is a literal and an object", "has a null 'date' value\"\"\" class NonStringDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when", "releases\"\"\" class OCDSMergeWarning(UserWarning): \"\"\"Base class for warnings from within this", "class OCDSMergeError(Exception): \"\"\"Base class for exceptions from within this package\"\"\"", "\"\"\"Raised when a release is missing a 'date' key\"\"\" def", "have the same value for the 'id' field\"\"\" def __init__(self,", "def __str__(self): return str(self.message) class NonObjectReleaseError(OCDSMergeError, TypeError): \"\"\"Raised when a", "a literal and an object in different releases\"\"\" class OCDSMergeWarning(UserWarning):", "a 'date' key\"\"\" def __init__(self, key, message): self.key = key", "class DuplicateIdValueWarning(OCDSMergeWarning): \"\"\"Used when at least two objects in the", "exceptions from within this package\"\"\" class MissingDateKeyError(OCDSMergeError, KeyError): \"\"\"Raised when", "path self.id = id self.message = message def __str__(self): return", "release has a non-string 'date' value\"\"\" class InconsistentTypeError(OCDSMergeError, TypeError): \"\"\"Raised", "def __init__(self, path, id, message): self.path = path self.id =", "same value for the 'id' field\"\"\" def __init__(self, path, id,", "= key self.message = message def __str__(self): return str(self.message) class", "a release is not an object\"\"\" class NullDateValueError(OCDSMergeError, TypeError): \"\"\"Raised", "value\"\"\" class InconsistentTypeError(OCDSMergeError, TypeError): \"\"\"Raised when a path is a", "least two objects in the same array have the same", "path is a literal and an object in different releases\"\"\"", "NullDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a release has a null 'date'", "is not an object\"\"\" class NullDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a", "NonStringDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a release has a non-string 'date'", "package\"\"\" class DuplicateIdValueWarning(OCDSMergeWarning): \"\"\"Used when at least two objects in", "the 'id' field\"\"\" def __init__(self, path, id, message): self.path =", "\"\"\"Base class for exceptions from within this package\"\"\" class MissingDateKeyError(OCDSMergeError,", "and an object in different releases\"\"\" class OCDSMergeWarning(UserWarning): \"\"\"Base class", "TypeError): \"\"\"Raised when a release has a non-string 'date' value\"\"\"", "class OCDSMergeWarning(UserWarning): \"\"\"Base class for warnings from within this package\"\"\"", "for the 'id' field\"\"\" def __init__(self, path, id, message): self.path", "'date' value\"\"\" class NonStringDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a release has", "\"\"\"Used when at least two objects in the same array", "= path self.id = id self.message = message def __str__(self):", "NonObjectReleaseError(OCDSMergeError, TypeError): \"\"\"Raised when a release is not an object\"\"\"", "an object\"\"\" class NullDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a release has", "is missing a 'date' key\"\"\" def __init__(self, key, message): self.key", "'id' field\"\"\" def __init__(self, path, id, message): self.path = path", "self.path = path self.id = id self.message = message def", "at least two objects in the same array have the", "when a release is not an object\"\"\" class NullDateValueError(OCDSMergeError, TypeError):", "null 'date' value\"\"\" class NonStringDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a release", "path, id, message): self.path = path self.id = id self.message", "literal and an object in different releases\"\"\" class OCDSMergeWarning(UserWarning): \"\"\"Base", "this package\"\"\" class MissingDateKeyError(OCDSMergeError, KeyError): \"\"\"Raised when a release is", "class for warnings from within this package\"\"\" class DuplicateIdValueWarning(OCDSMergeWarning): \"\"\"Used", "value\"\"\" class NonStringDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a release has a", "from within this package\"\"\" class MissingDateKeyError(OCDSMergeError, KeyError): \"\"\"Raised when a", "self.message = message def __str__(self): return str(self.message) class NonObjectReleaseError(OCDSMergeError, TypeError):", "a release has a null 'date' value\"\"\" class NonStringDateValueError(OCDSMergeError, TypeError):", "when a path is a literal and an object in", "an object in different releases\"\"\" class OCDSMergeWarning(UserWarning): \"\"\"Base class for", "DuplicateIdValueWarning(OCDSMergeWarning): \"\"\"Used when at least two objects in the same", "a non-string 'date' value\"\"\" class InconsistentTypeError(OCDSMergeError, TypeError): \"\"\"Raised when a", "class for exceptions from within this package\"\"\" class MissingDateKeyError(OCDSMergeError, KeyError):", "key\"\"\" def __init__(self, key, message): self.key = key self.message =", "return str(self.message) class NonObjectReleaseError(OCDSMergeError, TypeError): \"\"\"Raised when a release is", "class NonStringDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a release has a non-string", "a null 'date' value\"\"\" class NonStringDateValueError(OCDSMergeError, TypeError): \"\"\"Raised when a", "package\"\"\" class MissingDateKeyError(OCDSMergeError, KeyError): \"\"\"Raised when a release is missing", "within this package\"\"\" class DuplicateIdValueWarning(OCDSMergeWarning): \"\"\"Used when at least two", "a path is a literal and an object in different", "is a literal and an object in different releases\"\"\" class", "InconsistentTypeError(OCDSMergeError, TypeError): \"\"\"Raised when a path is a literal and", "key self.message = message def __str__(self): return str(self.message) class NonObjectReleaseError(OCDSMergeError,", "this package\"\"\" class DuplicateIdValueWarning(OCDSMergeWarning): \"\"\"Used when at least two objects", "the same value for the 'id' field\"\"\" def __init__(self, path,", "OCDSMergeError(Exception): \"\"\"Base class for exceptions from within this package\"\"\" class", "\"\"\"Raised when a release is not an object\"\"\" class NullDateValueError(OCDSMergeError,", "MissingDateKeyError(OCDSMergeError, KeyError): \"\"\"Raised when a release is missing a 'date'" ]
[ "buttons1_state = buttons1_state | self.state[\"BTN_A\"] buttons1_state = buttons1_state | self.state[\"BTN_B\"]", "<< 3 buttons2_state = buttons2_state | self.state[\"BTN_TL\"] << 4 packet", "buttons2_state = buttons2_state | self.state[\"BTN_TL\"] << 4 packet = struct.pack('6h2c',", "'{0:06b}'.format(int.from_bytes(buttons1, byteorder=\"big\")) holder2 = '{0:05b}'.format(int.from_bytes(buttons2, byteorder=\"big\")) for i in holder1:", "= packet[14:30] state = struct.unpack('6h2B2c', state) buttons1 = state[8] buttons2", "buttons2_state.to_bytes(1, byteorder=\"big\")) return packet def decode(self, packet): buttons = []", "holder1 = '{0:06b}'.format(int.from_bytes(buttons1, byteorder=\"big\")) holder2 = '{0:05b}'.format(int.from_bytes(buttons2, byteorder=\"big\")) for i", "= self.device.capabilities(absinfo=False) self.config = {} self.state = {} def build(self):", "for event in self.device.read_loop(): if event.type == evdev.ecodes.EV_KEY or event.type", "4 packet = struct.pack('6h2c', self.state[\"ABS_X\"], self.state[\"ABS_Y\"], self.state[\"ABS_RX\"], self.state[\"ABS_RY\"], self.state[\"ABS_HAT0X\"], self.state[\"ABS_HAT0Y\"],", "packet): buttons = [] state = packet[14:30] state = struct.unpack('6h2B2c',", "self.state[\"BTN_NORTH\"] << 2 buttons1_state = buttons1_state | self.state[\"BTN_WEST\"] << 3", "in holder2: buttons.append(int(i)) state = list(state[ :7]) + buttons return", "state dictionary for controller\"\"\" #build config dictionary by code and", "#build config dictionary by code and name for key, value", "return(self.update_state(event)) def update_state(self, event): self.state[self.config[event.code]] = event.value buttons1_state = 0", "for code in self.capaRAW[1]: self.state[self.config[code]] = 0 for code in", "= '{0:05b}'.format(int.from_bytes(buttons2, byteorder=\"big\")) for i in holder1: buttons.append(int(i)) for i", "= '{0:06b}'.format(int.from_bytes(buttons1, byteorder=\"big\")) holder2 = '{0:05b}'.format(int.from_bytes(buttons2, byteorder=\"big\")) for i in", "1 buttons2_state = buttons2_state | self.state[\"BTN_SELECT\"] << 2 buttons2_state =", "return packet def decode(self, packet): buttons = [] state =", "for event\") for event in self.device.read_loop(): if event.type == evdev.ecodes.EV_KEY", "self.state[\"BTN_B\"] << 1 buttons1_state = buttons1_state | self.state[\"BTN_NORTH\"] << 2", "byteorder=\"big\")) return packet def decode(self, packet): buttons = [] state", "= struct.pack('6h2c', self.state[\"ABS_X\"], self.state[\"ABS_Y\"], self.state[\"ABS_RX\"], self.state[\"ABS_RY\"], self.state[\"ABS_HAT0X\"], self.state[\"ABS_HAT0Y\"], buttons1_state.to_bytes(1, byteorder=\"big\"),", "i in holder2: buttons.append(int(i)) state = list(state[ :7]) + buttons", "buttons2_state | self.state[\"BTN_TR\"] << 3 buttons2_state = buttons2_state | self.state[\"BTN_TL\"]", "self.config[element[1]] = element[0][0] elif (\"SYN\" in str(element[0])) or (\"FF\" in", "for code in self.capaRAW[3]: self.state[self.config[code]] = 0 print(\"waiting for event\")", "is list: self.config[element[1]] = element[0][0] elif (\"SYN\" in str(element[0])) or", "buttons = [] state = packet[14:30] state = struct.unpack('6h2B2c', state)", "= {} self.state = {} def build(self): \"\"\"build state dictionary", "def build(self): \"\"\"build state dictionary for controller\"\"\" #build config dictionary", "print(\"waiting for event\") for event in self.device.read_loop(): if event.type ==", "self.config[element[0][1]] = element[0][0] elif type(element[0]) is list: self.config[element[1]] = element[0][0]", "packet = struct.pack('6h2c', self.state[\"ABS_X\"], self.state[\"ABS_Y\"], self.state[\"ABS_RX\"], self.state[\"ABS_RY\"], self.state[\"ABS_HAT0X\"], self.state[\"ABS_HAT0Y\"], buttons1_state.to_bytes(1,", "element[0] #build state dictionary from raw codes for code in", "update_state(self, event): self.state[self.config[event.code]] = event.value buttons1_state = 0 buttons1_state =", "element in value: if type(element[0]) is tuple: self.config[element[0][1]] = element[0][0]", "code in self.capaRAW[1]: self.state[self.config[code]] = 0 for code in self.capaRAW[3]:", "== evdev.ecodes.EV_ABS: return(self.update_state(event)) def update_state(self, event): self.state[self.config[event.code]] = event.value buttons1_state", "evdev.InputDevice(\"/dev/input/event2\") self.capabilities = self.device.capabilities(verbose=True) self.capaRAW = self.device.capabilities(absinfo=False) self.config = {}", "code and name for key, value in self.capabilities.items(): for element", "self.capaRAW[1]: self.state[self.config[code]] = 0 for code in self.capaRAW[3]: self.state[self.config[code]] =", "buttons1_state = buttons1_state | self.state[\"BTN_WEST\"] << 3 buttons2_state = 0", "= buttons1_state | self.state[\"BTN_A\"] buttons1_state = buttons1_state | self.state[\"BTN_B\"] <<", "| self.state[\"BTN_WEST\"] << 3 buttons2_state = 0 buttons2_state = buttons2_state", "def decode(self, packet): buttons = [] state = packet[14:30] state", "from raw codes for code in self.capaRAW[1]: self.state[self.config[code]] = 0", "or (\"FF\" in str(element[0])): pass else: self.config[element[1]] = element[0] #build", "state[8] buttons2 = state[9] holder1 = '{0:06b}'.format(int.from_bytes(buttons1, byteorder=\"big\")) holder2 =", "| self.state[\"BTN_MODE\"] << 1 buttons2_state = buttons2_state | self.state[\"BTN_SELECT\"] <<", "| self.state[\"BTN_SELECT\"] << 2 buttons2_state = buttons2_state | self.state[\"BTN_TR\"] <<", "<< 1 buttons1_state = buttons1_state | self.state[\"BTN_NORTH\"] << 2 buttons1_state", "self.config = {} self.state = {} def build(self): \"\"\"build state", "in self.capaRAW[1]: self.state[self.config[code]] = 0 for code in self.capaRAW[3]: self.state[self.config[code]]", "= state[9] holder1 = '{0:06b}'.format(int.from_bytes(buttons1, byteorder=\"big\")) holder2 = '{0:05b}'.format(int.from_bytes(buttons2, byteorder=\"big\"))", "= 0 buttons1_state = buttons1_state | self.state[\"BTN_A\"] buttons1_state = buttons1_state", "self.state[\"BTN_TR\"] << 3 buttons2_state = buttons2_state | self.state[\"BTN_TL\"] << 4", "pass else: self.config[element[1]] = element[0] #build state dictionary from raw", "dictionary from raw codes for code in self.capaRAW[1]: self.state[self.config[code]] =", "buttons2 = state[9] holder1 = '{0:06b}'.format(int.from_bytes(buttons1, byteorder=\"big\")) holder2 = '{0:05b}'.format(int.from_bytes(buttons2,", "by code and name for key, value in self.capabilities.items(): for", "code in self.capaRAW[3]: self.state[self.config[code]] = 0 print(\"waiting for event\") for", "import struct class appcodec(): def __init__(self): self.device = evdev.InputDevice(\"/dev/input/event2\") self.capabilities", "= buttons1_state | self.state[\"BTN_WEST\"] << 3 buttons2_state = 0 buttons2_state", "in str(element[0])): pass else: self.config[element[1]] = element[0] #build state dictionary", "holder2 = '{0:05b}'.format(int.from_bytes(buttons2, byteorder=\"big\")) for i in holder1: buttons.append(int(i)) for", "def update_state(self, event): self.state[self.config[event.code]] = event.value buttons1_state = 0 buttons1_state", "self.state[\"ABS_RX\"], self.state[\"ABS_RY\"], self.state[\"ABS_HAT0X\"], self.state[\"ABS_HAT0Y\"], buttons1_state.to_bytes(1, byteorder=\"big\"), buttons2_state.to_bytes(1, byteorder=\"big\")) return packet", "dictionary by code and name for key, value in self.capabilities.items():", "i in holder1: buttons.append(int(i)) for i in holder2: buttons.append(int(i)) state", "holder1: buttons.append(int(i)) for i in holder2: buttons.append(int(i)) state = list(state[", "class appcodec(): def __init__(self): self.device = evdev.InputDevice(\"/dev/input/event2\") self.capabilities = self.device.capabilities(verbose=True)", "<filename>appcodec.py import evdev import time import struct class appcodec(): def", "self.state = {} def build(self): \"\"\"build state dictionary for controller\"\"\"", "self.state[\"BTN_A\"] buttons1_state = buttons1_state | self.state[\"BTN_B\"] << 1 buttons1_state =", "import time import struct class appcodec(): def __init__(self): self.device =", "decode(self, packet): buttons = [] state = packet[14:30] state =", "= buttons2_state | self.state[\"BTN_START\"] buttons2_state = buttons2_state | self.state[\"BTN_MODE\"] <<", "__init__(self): self.device = evdev.InputDevice(\"/dev/input/event2\") self.capabilities = self.device.capabilities(verbose=True) self.capaRAW = self.device.capabilities(absinfo=False)", "0 buttons1_state = buttons1_state | self.state[\"BTN_A\"] buttons1_state = buttons1_state |", "state dictionary from raw codes for code in self.capaRAW[1]: self.state[self.config[code]]", "{} self.state = {} def build(self): \"\"\"build state dictionary for", "buttons2_state | self.state[\"BTN_MODE\"] << 1 buttons2_state = buttons2_state | self.state[\"BTN_SELECT\"]", "= buttons1_state | self.state[\"BTN_B\"] << 1 buttons1_state = buttons1_state |", "self.config[element[1]] = element[0] #build state dictionary from raw codes for", "#build state dictionary from raw codes for code in self.capaRAW[1]:", "in str(element[0])) or (\"FF\" in str(element[0])): pass else: self.config[element[1]] =", "buttons2_state = buttons2_state | self.state[\"BTN_SELECT\"] << 2 buttons2_state = buttons2_state", "= 0 for code in self.capaRAW[3]: self.state[self.config[code]] = 0 print(\"waiting", "event in self.device.read_loop(): if event.type == evdev.ecodes.EV_KEY or event.type ==", "evdev.ecodes.EV_KEY or event.type == evdev.ecodes.EV_ABS: return(self.update_state(event)) def update_state(self, event): self.state[self.config[event.code]]", "event): self.state[self.config[event.code]] = event.value buttons1_state = 0 buttons1_state = buttons1_state", "event.value buttons1_state = 0 buttons1_state = buttons1_state | self.state[\"BTN_A\"] buttons1_state", "dictionary for controller\"\"\" #build config dictionary by code and name", "buttons2_state | self.state[\"BTN_TL\"] << 4 packet = struct.pack('6h2c', self.state[\"ABS_X\"], self.state[\"ABS_Y\"],", "value: if type(element[0]) is tuple: self.config[element[0][1]] = element[0][0] elif type(element[0])", "state) buttons1 = state[8] buttons2 = state[9] holder1 = '{0:06b}'.format(int.from_bytes(buttons1,", "for controller\"\"\" #build config dictionary by code and name for", "controller\"\"\" #build config dictionary by code and name for key,", "= {} def build(self): \"\"\"build state dictionary for controller\"\"\" #build", "self.state[\"ABS_Y\"], self.state[\"ABS_RX\"], self.state[\"ABS_RY\"], self.state[\"ABS_HAT0X\"], self.state[\"ABS_HAT0Y\"], buttons1_state.to_bytes(1, byteorder=\"big\"), buttons2_state.to_bytes(1, byteorder=\"big\")) return", "event.type == evdev.ecodes.EV_ABS: return(self.update_state(event)) def update_state(self, event): self.state[self.config[event.code]] = event.value", "in self.device.read_loop(): if event.type == evdev.ecodes.EV_KEY or event.type == evdev.ecodes.EV_ABS:", "list: self.config[element[1]] = element[0][0] elif (\"SYN\" in str(element[0])) or (\"FF\"", "config dictionary by code and name for key, value in", "and name for key, value in self.capabilities.items(): for element in", "buttons1 = state[8] buttons2 = state[9] holder1 = '{0:06b}'.format(int.from_bytes(buttons1, byteorder=\"big\"))", "\"\"\"build state dictionary for controller\"\"\" #build config dictionary by code", "struct.unpack('6h2B2c', state) buttons1 = state[8] buttons2 = state[9] holder1 =", "for key, value in self.capabilities.items(): for element in value: if", "is tuple: self.config[element[0][1]] = element[0][0] elif type(element[0]) is list: self.config[element[1]]", "struct class appcodec(): def __init__(self): self.device = evdev.InputDevice(\"/dev/input/event2\") self.capabilities =", "self.capaRAW = self.device.capabilities(absinfo=False) self.config = {} self.state = {} def", "value in self.capabilities.items(): for element in value: if type(element[0]) is", "= 0 buttons2_state = buttons2_state | self.state[\"BTN_START\"] buttons2_state = buttons2_state", "elif type(element[0]) is list: self.config[element[1]] = element[0][0] elif (\"SYN\" in", "self.device.capabilities(absinfo=False) self.config = {} self.state = {} def build(self): \"\"\"build", "3 buttons2_state = buttons2_state | self.state[\"BTN_TL\"] << 4 packet =", "buttons2_state = buttons2_state | self.state[\"BTN_TR\"] << 3 buttons2_state = buttons2_state", "= [] state = packet[14:30] state = struct.unpack('6h2B2c', state) buttons1", "self.state[\"ABS_HAT0Y\"], buttons1_state.to_bytes(1, byteorder=\"big\"), buttons2_state.to_bytes(1, byteorder=\"big\")) return packet def decode(self, packet):", "for i in holder1: buttons.append(int(i)) for i in holder2: buttons.append(int(i))", "self.state[\"BTN_WEST\"] << 3 buttons2_state = 0 buttons2_state = buttons2_state |", "state[9] holder1 = '{0:06b}'.format(int.from_bytes(buttons1, byteorder=\"big\")) holder2 = '{0:05b}'.format(int.from_bytes(buttons2, byteorder=\"big\")) for", "for i in holder2: buttons.append(int(i)) state = list(state[ :7]) +", "name for key, value in self.capabilities.items(): for element in value:", "= event.value buttons1_state = 0 buttons1_state = buttons1_state | self.state[\"BTN_A\"]", "<< 3 buttons2_state = 0 buttons2_state = buttons2_state | self.state[\"BTN_START\"]", "buttons2_state | self.state[\"BTN_SELECT\"] << 2 buttons2_state = buttons2_state | self.state[\"BTN_TR\"]", "self.capabilities.items(): for element in value: if type(element[0]) is tuple: self.config[element[0][1]]", "evdev.ecodes.EV_ABS: return(self.update_state(event)) def update_state(self, event): self.state[self.config[event.code]] = event.value buttons1_state =", "self.state[\"BTN_SELECT\"] << 2 buttons2_state = buttons2_state | self.state[\"BTN_TR\"] << 3", "<< 2 buttons1_state = buttons1_state | self.state[\"BTN_WEST\"] << 3 buttons2_state", "self.state[self.config[code]] = 0 print(\"waiting for event\") for event in self.device.read_loop():", "'{0:05b}'.format(int.from_bytes(buttons2, byteorder=\"big\")) for i in holder1: buttons.append(int(i)) for i in", "= element[0] #build state dictionary from raw codes for code", "<< 1 buttons2_state = buttons2_state | self.state[\"BTN_SELECT\"] << 2 buttons2_state", "self.state[self.config[event.code]] = event.value buttons1_state = 0 buttons1_state = buttons1_state |", "buttons2_state = 0 buttons2_state = buttons2_state | self.state[\"BTN_START\"] buttons2_state =", "0 print(\"waiting for event\") for event in self.device.read_loop(): if event.type", "import evdev import time import struct class appcodec(): def __init__(self):", "build(self): \"\"\"build state dictionary for controller\"\"\" #build config dictionary by", "buttons1_state = buttons1_state | self.state[\"BTN_B\"] << 1 buttons1_state = buttons1_state", "buttons1_state.to_bytes(1, byteorder=\"big\"), buttons2_state.to_bytes(1, byteorder=\"big\")) return packet def decode(self, packet): buttons", "self.capaRAW[3]: self.state[self.config[code]] = 0 print(\"waiting for event\") for event in", "= buttons2_state | self.state[\"BTN_TR\"] << 3 buttons2_state = buttons2_state |", "buttons1_state | self.state[\"BTN_B\"] << 1 buttons1_state = buttons1_state | self.state[\"BTN_NORTH\"]", "2 buttons2_state = buttons2_state | self.state[\"BTN_TR\"] << 3 buttons2_state =", "<< 4 packet = struct.pack('6h2c', self.state[\"ABS_X\"], self.state[\"ABS_Y\"], self.state[\"ABS_RX\"], self.state[\"ABS_RY\"], self.state[\"ABS_HAT0X\"],", "def __init__(self): self.device = evdev.InputDevice(\"/dev/input/event2\") self.capabilities = self.device.capabilities(verbose=True) self.capaRAW =", "tuple: self.config[element[0][1]] = element[0][0] elif type(element[0]) is list: self.config[element[1]] =", "packet def decode(self, packet): buttons = [] state = packet[14:30]", "buttons.append(int(i)) for i in holder2: buttons.append(int(i)) state = list(state[ :7])", "if event.type == evdev.ecodes.EV_KEY or event.type == evdev.ecodes.EV_ABS: return(self.update_state(event)) def", "= buttons2_state | self.state[\"BTN_TL\"] << 4 packet = struct.pack('6h2c', self.state[\"ABS_X\"],", "| self.state[\"BTN_TR\"] << 3 buttons2_state = buttons2_state | self.state[\"BTN_TL\"] <<", "self.state[\"BTN_TL\"] << 4 packet = struct.pack('6h2c', self.state[\"ABS_X\"], self.state[\"ABS_Y\"], self.state[\"ABS_RX\"], self.state[\"ABS_RY\"],", "1 buttons1_state = buttons1_state | self.state[\"BTN_NORTH\"] << 2 buttons1_state =", "type(element[0]) is tuple: self.config[element[0][1]] = element[0][0] elif type(element[0]) is list:", "<< 2 buttons2_state = buttons2_state | self.state[\"BTN_TR\"] << 3 buttons2_state", "type(element[0]) is list: self.config[element[1]] = element[0][0] elif (\"SYN\" in str(element[0]))", "state = struct.unpack('6h2B2c', state) buttons1 = state[8] buttons2 = state[9]", "[] state = packet[14:30] state = struct.unpack('6h2B2c', state) buttons1 =", "= element[0][0] elif type(element[0]) is list: self.config[element[1]] = element[0][0] elif", "element[0][0] elif type(element[0]) is list: self.config[element[1]] = element[0][0] elif (\"SYN\"", "= state[8] buttons2 = state[9] holder1 = '{0:06b}'.format(int.from_bytes(buttons1, byteorder=\"big\")) holder2", "(\"SYN\" in str(element[0])) or (\"FF\" in str(element[0])): pass else: self.config[element[1]]", "evdev import time import struct class appcodec(): def __init__(self): self.device", "key, value in self.capabilities.items(): for element in value: if type(element[0])", "| self.state[\"BTN_B\"] << 1 buttons1_state = buttons1_state | self.state[\"BTN_NORTH\"] <<", "for element in value: if type(element[0]) is tuple: self.config[element[0][1]] =", "== evdev.ecodes.EV_KEY or event.type == evdev.ecodes.EV_ABS: return(self.update_state(event)) def update_state(self, event):", "buttons1_state = buttons1_state | self.state[\"BTN_NORTH\"] << 2 buttons1_state = buttons1_state", "struct.pack('6h2c', self.state[\"ABS_X\"], self.state[\"ABS_Y\"], self.state[\"ABS_RX\"], self.state[\"ABS_RY\"], self.state[\"ABS_HAT0X\"], self.state[\"ABS_HAT0Y\"], buttons1_state.to_bytes(1, byteorder=\"big\"), buttons2_state.to_bytes(1,", "self.state[\"ABS_RY\"], self.state[\"ABS_HAT0X\"], self.state[\"ABS_HAT0Y\"], buttons1_state.to_bytes(1, byteorder=\"big\"), buttons2_state.to_bytes(1, byteorder=\"big\")) return packet def", "3 buttons2_state = 0 buttons2_state = buttons2_state | self.state[\"BTN_START\"] buttons2_state", "self.state[\"BTN_START\"] buttons2_state = buttons2_state | self.state[\"BTN_MODE\"] << 1 buttons2_state =", "buttons2_state = buttons2_state | self.state[\"BTN_START\"] buttons2_state = buttons2_state | self.state[\"BTN_MODE\"]", "byteorder=\"big\"), buttons2_state.to_bytes(1, byteorder=\"big\")) return packet def decode(self, packet): buttons =", "self.state[\"ABS_X\"], self.state[\"ABS_Y\"], self.state[\"ABS_RX\"], self.state[\"ABS_RY\"], self.state[\"ABS_HAT0X\"], self.state[\"ABS_HAT0Y\"], buttons1_state.to_bytes(1, byteorder=\"big\"), buttons2_state.to_bytes(1, byteorder=\"big\"))", "buttons2_state | self.state[\"BTN_START\"] buttons2_state = buttons2_state | self.state[\"BTN_MODE\"] << 1", "= buttons1_state | self.state[\"BTN_NORTH\"] << 2 buttons1_state = buttons1_state |", "| self.state[\"BTN_A\"] buttons1_state = buttons1_state | self.state[\"BTN_B\"] << 1 buttons1_state", "{} def build(self): \"\"\"build state dictionary for controller\"\"\" #build config", "= evdev.InputDevice(\"/dev/input/event2\") self.capabilities = self.device.capabilities(verbose=True) self.capaRAW = self.device.capabilities(absinfo=False) self.config =", "self.capabilities = self.device.capabilities(verbose=True) self.capaRAW = self.device.capabilities(absinfo=False) self.config = {} self.state", "buttons1_state | self.state[\"BTN_A\"] buttons1_state = buttons1_state | self.state[\"BTN_B\"] << 1", "= struct.unpack('6h2B2c', state) buttons1 = state[8] buttons2 = state[9] holder1", "byteorder=\"big\")) holder2 = '{0:05b}'.format(int.from_bytes(buttons2, byteorder=\"big\")) for i in holder1: buttons.append(int(i))", "= buttons2_state | self.state[\"BTN_MODE\"] << 1 buttons2_state = buttons2_state |", "buttons1_state = 0 buttons1_state = buttons1_state | self.state[\"BTN_A\"] buttons1_state =", "packet[14:30] state = struct.unpack('6h2B2c', state) buttons1 = state[8] buttons2 =", "byteorder=\"big\")) for i in holder1: buttons.append(int(i)) for i in holder2:", "self.device = evdev.InputDevice(\"/dev/input/event2\") self.capabilities = self.device.capabilities(verbose=True) self.capaRAW = self.device.capabilities(absinfo=False) self.config", "or event.type == evdev.ecodes.EV_ABS: return(self.update_state(event)) def update_state(self, event): self.state[self.config[event.code]] =", "else: self.config[element[1]] = element[0] #build state dictionary from raw codes", "in holder1: buttons.append(int(i)) for i in holder2: buttons.append(int(i)) state =", "buttons1_state | self.state[\"BTN_WEST\"] << 3 buttons2_state = 0 buttons2_state =", "= 0 print(\"waiting for event\") for event in self.device.read_loop(): if", "state = packet[14:30] state = struct.unpack('6h2B2c', state) buttons1 = state[8]", "= element[0][0] elif (\"SYN\" in str(element[0])) or (\"FF\" in str(element[0])):", "str(element[0])) or (\"FF\" in str(element[0])): pass else: self.config[element[1]] = element[0]", "| self.state[\"BTN_START\"] buttons2_state = buttons2_state | self.state[\"BTN_MODE\"] << 1 buttons2_state", "0 for code in self.capaRAW[3]: self.state[self.config[code]] = 0 print(\"waiting for", "raw codes for code in self.capaRAW[1]: self.state[self.config[code]] = 0 for", "elif (\"SYN\" in str(element[0])) or (\"FF\" in str(element[0])): pass else:", "(\"FF\" in str(element[0])): pass else: self.config[element[1]] = element[0] #build state", "str(element[0])): pass else: self.config[element[1]] = element[0] #build state dictionary from", "= buttons2_state | self.state[\"BTN_SELECT\"] << 2 buttons2_state = buttons2_state |", "codes for code in self.capaRAW[1]: self.state[self.config[code]] = 0 for code", "| self.state[\"BTN_TL\"] << 4 packet = struct.pack('6h2c', self.state[\"ABS_X\"], self.state[\"ABS_Y\"], self.state[\"ABS_RX\"],", "event.type == evdev.ecodes.EV_KEY or event.type == evdev.ecodes.EV_ABS: return(self.update_state(event)) def update_state(self,", "buttons2_state = buttons2_state | self.state[\"BTN_MODE\"] << 1 buttons2_state = buttons2_state", "self.state[\"ABS_HAT0X\"], self.state[\"ABS_HAT0Y\"], buttons1_state.to_bytes(1, byteorder=\"big\"), buttons2_state.to_bytes(1, byteorder=\"big\")) return packet def decode(self,", "in self.capabilities.items(): for element in value: if type(element[0]) is tuple:", "= self.device.capabilities(verbose=True) self.capaRAW = self.device.capabilities(absinfo=False) self.config = {} self.state =", "holder2: buttons.append(int(i)) state = list(state[ :7]) + buttons return state", "appcodec(): def __init__(self): self.device = evdev.InputDevice(\"/dev/input/event2\") self.capabilities = self.device.capabilities(verbose=True) self.capaRAW", "self.device.capabilities(verbose=True) self.capaRAW = self.device.capabilities(absinfo=False) self.config = {} self.state = {}", "2 buttons1_state = buttons1_state | self.state[\"BTN_WEST\"] << 3 buttons2_state =", "element[0][0] elif (\"SYN\" in str(element[0])) or (\"FF\" in str(element[0])): pass", "0 buttons2_state = buttons2_state | self.state[\"BTN_START\"] buttons2_state = buttons2_state |", "in value: if type(element[0]) is tuple: self.config[element[0][1]] = element[0][0] elif", "if type(element[0]) is tuple: self.config[element[0][1]] = element[0][0] elif type(element[0]) is", "event\") for event in self.device.read_loop(): if event.type == evdev.ecodes.EV_KEY or", "self.device.read_loop(): if event.type == evdev.ecodes.EV_KEY or event.type == evdev.ecodes.EV_ABS: return(self.update_state(event))", "| self.state[\"BTN_NORTH\"] << 2 buttons1_state = buttons1_state | self.state[\"BTN_WEST\"] <<", "time import struct class appcodec(): def __init__(self): self.device = evdev.InputDevice(\"/dev/input/event2\")", "buttons1_state | self.state[\"BTN_NORTH\"] << 2 buttons1_state = buttons1_state | self.state[\"BTN_WEST\"]", "self.state[self.config[code]] = 0 for code in self.capaRAW[3]: self.state[self.config[code]] = 0", "in self.capaRAW[3]: self.state[self.config[code]] = 0 print(\"waiting for event\") for event", "self.state[\"BTN_MODE\"] << 1 buttons2_state = buttons2_state | self.state[\"BTN_SELECT\"] << 2" ]
[ "run at 640x480 in grayscale... def barcode_name(code): if(code.type() == image.EAN2):", "# must turn this off to prevent image washout... sensor.set_auto_whitebal(False)", "a higher resolution # to work well so it should", "higher resolution # to work well so it should always", "# V Res of 80 == less work (40 for", "in grayscale... def barcode_name(code): if(code.type() == image.EAN2): return \"EAN2\" if(code.type()", "= 2000) sensor.set_auto_gain(False) # must turn this off to prevent", "image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent", "== image.I25): return \"I25\" if(code.type() == image.DATABAR): return \"DATABAR\" if(code.type()", "return \"PDF417\" if(code.type() == image.CODE93): return \"CODE93\" if(code.type() == image.CODE128):", "clock = time.clock() # Barcode detection can run at the", "== image.CODE39): return \"CODE39\" if(code.type() == image.PDF417): return \"PDF417\" if(code.type()", "Cam M7. Barcode detection does not work on the M4", "your OpenMV Cam's # OV7725 camera module. Barcode detection will", "return \"CODE128\" while(True): clock.tick() img = sensor.snapshot() codes = img.find_barcodes()", "run at the full 640x480 resolution of your OpenMV Cam's", "return \"ISBN13\" if(code.type() == image.I25): return \"I25\" if(code.type() == image.DATABAR):", "return \"EAN5\" if(code.type() == image.EAN8): return \"EAN8\" if(code.type() == image.UPCE):", "the # OpenMV Cam M7. Barcode detection does not work", "image.EAN13): return \"EAN13\" if(code.type() == image.ISBN13): return \"ISBN13\" if(code.type() ==", "%d, FPS %f\" % print_args) if not codes: print(\"FPS %f\"", "# Barcode Example # # This example shows off how", "is to detect bar codes using the # OpenMV Cam", "a lower resolution. That said, barcode detection requires a higher", "code in codes: img.draw_rectangle(code.rect()) print_args = (barcode_name(code), code.payload(), (180 *", "High Res! sensor.set_windowing((640, 80)) # V Res of 80 ==", "Cam's # OV7725 camera module. Barcode detection will also work", "return \"UPCE\" if(code.type() == image.ISBN10): return \"ISBN10\" if(code.type() == image.UPCA):", "\"PDF417\" if(code.type() == image.CODE93): return \"CODE93\" if(code.type() == image.CODE128): return", "rotation %f (degrees), quality %d, FPS %f\" % print_args) if", "sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must turn this off to", "code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps()) print(\"Barcode %s,", "Barcode detection will also work in RGB565 mode but at", "\"EAN8\" if(code.type() == image.UPCE): return \"UPCE\" if(code.type() == image.ISBN10): return", "work in RGB565 mode but at # a lower resolution.", "== image.UPCE): return \"UPCE\" if(code.type() == image.ISBN10): return \"ISBN10\" if(code.type()", "image.CODE39): return \"CODE39\" if(code.type() == image.PDF417): return \"PDF417\" if(code.type() ==", "work on the M4 Camera. import sensor, image, time, math", "image, time, math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) # High Res! sensor.set_windowing((640,", "codes = img.find_barcodes() for code in codes: img.draw_rectangle(code.rect()) print_args =", "does not work on the M4 Camera. import sensor, image,", "== image.EAN2): return \"EAN2\" if(code.type() == image.EAN5): return \"EAN5\" if(code.type()", "sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) # High Res! sensor.set_windowing((640, 80)) # V", "# High Res! sensor.set_windowing((640, 80)) # V Res of 80", "Barcode detection does not work on the M4 Camera. import", "turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must", "image.CODE93): return \"CODE93\" if(code.type() == image.CODE128): return \"CODE128\" while(True): clock.tick()", "print(\"Barcode %s, Payload \\\"%s\\\", rotation %f (degrees), quality %d, FPS", "barcode detection requires a higher resolution # to work well", "80 == less work (40 for 2X the speed). sensor.skip_frames(time", "camera module. Barcode detection will also work in RGB565 mode", "full 640x480 resolution of your OpenMV Cam's # OV7725 camera", "time, math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) # High Res! sensor.set_windowing((640, 80))", "not work on the M4 Camera. import sensor, image, time,", "image.I25): return \"I25\" if(code.type() == image.DATABAR): return \"DATABAR\" if(code.type() ==", "== image.CODABAR): return \"CODABAR\" if(code.type() == image.CODE39): return \"CODE39\" if(code.type()", "work well so it should always be run at 640x480", "OpenMV Cam M7. Barcode detection does not work on the", "OV7725 camera module. Barcode detection will also work in RGB565", "module. Barcode detection will also work in RGB565 mode but", "to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off", "the M4 Camera. import sensor, image, time, math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE)", "# This example shows off how easy it is to", "this off to prevent image washout... clock = time.clock() #", "at # a lower resolution. That said, barcode detection requires", "image.PDF417): return \"PDF417\" if(code.type() == image.CODE93): return \"CODE93\" if(code.type() ==", "to work well so it should always be run at", "# to work well so it should always be run", "if(code.type() == image.EAN13): return \"EAN13\" if(code.type() == image.ISBN13): return \"ISBN13\"", "return \"ISBN10\" if(code.type() == image.UPCA): return \"UPCA\" if(code.type() == image.EAN13):", "# OpenMV Cam M7. Barcode detection does not work on", "%f\" % print_args) if not codes: print(\"FPS %f\" % clock.fps())", "if(code.type() == image.EAN8): return \"EAN8\" if(code.type() == image.UPCE): return \"UPCE\"", "Camera. import sensor, image, time, math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) #", "less work (40 for 2X the speed). sensor.skip_frames(time = 2000)", "at 640x480 in grayscale... def barcode_name(code): if(code.type() == image.EAN2): return", "image.EAN2): return \"EAN2\" if(code.type() == image.EAN5): return \"EAN5\" if(code.type() ==", "turn this off to prevent image washout... clock = time.clock()", "return \"I25\" if(code.type() == image.DATABAR): return \"DATABAR\" if(code.type() == image.DATABAR_EXP):", "Payload \\\"%s\\\", rotation %f (degrees), quality %d, FPS %f\" %", "always be run at 640x480 in grayscale... def barcode_name(code): if(code.type()", "this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn", "V Res of 80 == less work (40 for 2X", "\"CODE93\" if(code.type() == image.CODE128): return \"CODE128\" while(True): clock.tick() img =", "said, barcode detection requires a higher resolution # to work", "OpenMV Cam's # OV7725 camera module. Barcode detection will also", "\"CODABAR\" if(code.type() == image.CODE39): return \"CODE39\" if(code.type() == image.PDF417): return", "img.draw_rectangle(code.rect()) print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi,", "washout... clock = time.clock() # Barcode detection can run at", "image.ISBN10): return \"ISBN10\" if(code.type() == image.UPCA): return \"UPCA\" if(code.type() ==", "in RGB565 mode but at # a lower resolution. That", "speed). sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must turn this off", "if(code.type() == image.ISBN13): return \"ISBN13\" if(code.type() == image.I25): return \"I25\"", "should always be run at 640x480 in grayscale... def barcode_name(code):", "it is to detect bar codes using the # OpenMV", "640x480 in grayscale... def barcode_name(code): if(code.type() == image.EAN2): return \"EAN2\"", "== image.EAN8): return \"EAN8\" if(code.type() == image.UPCE): return \"UPCE\" if(code.type()", "sensor.set_auto_gain(False) # must turn this off to prevent image washout...", "if(code.type() == image.CODE93): return \"CODE93\" if(code.type() == image.CODE128): return \"CODE128\"", "\"EAN2\" if(code.type() == image.EAN5): return \"EAN5\" if(code.type() == image.EAN8): return", "prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to", "Barcode Example # # This example shows off how easy", "must turn this off to prevent image washout... clock =", "\"DATABAR\" if(code.type() == image.DATABAR_EXP): return \"DATABAR_EXP\" if(code.type() == image.CODABAR): return", "image.CODE128): return \"CODE128\" while(True): clock.tick() img = sensor.snapshot() codes =", "== image.CODE128): return \"CODE128\" while(True): clock.tick() img = sensor.snapshot() codes", "detection can run at the full 640x480 resolution of your", "grayscale... def barcode_name(code): if(code.type() == image.EAN2): return \"EAN2\" if(code.type() ==", "(40 for 2X the speed). sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) #", "off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this", "how easy it is to detect bar codes using the", "detection requires a higher resolution # to work well so", "if(code.type() == image.UPCE): return \"UPCE\" if(code.type() == image.ISBN10): return \"ISBN10\"", "if(code.type() == image.ISBN10): return \"ISBN10\" if(code.type() == image.UPCA): return \"UPCA\"", "detect bar codes using the # OpenMV Cam M7. Barcode", "return \"CODE93\" if(code.type() == image.CODE128): return \"CODE128\" while(True): clock.tick() img", "/ math.pi, code.quality(), clock.fps()) print(\"Barcode %s, Payload \\\"%s\\\", rotation %f", "mode but at # a lower resolution. That said, barcode", "resolution. That said, barcode detection requires a higher resolution #", "if(code.type() == image.CODE39): return \"CODE39\" if(code.type() == image.PDF417): return \"PDF417\"", "clock.fps()) print(\"Barcode %s, Payload \\\"%s\\\", rotation %f (degrees), quality %d,", "image.UPCA): return \"UPCA\" if(code.type() == image.EAN13): return \"EAN13\" if(code.type() ==", "it should always be run at 640x480 in grayscale... def", "80)) # V Res of 80 == less work (40", "bar codes using the # OpenMV Cam M7. Barcode detection", "time.clock() # Barcode detection can run at the full 640x480", "\"ISBN13\" if(code.type() == image.I25): return \"I25\" if(code.type() == image.DATABAR): return", "return \"CODE39\" if(code.type() == image.PDF417): return \"PDF417\" if(code.type() == image.CODE93):", "if(code.type() == image.DATABAR_EXP): return \"DATABAR_EXP\" if(code.type() == image.CODABAR): return \"CODABAR\"", "resolution # to work well so it should always be", "lower resolution. That said, barcode detection requires a higher resolution", "= img.find_barcodes() for code in codes: img.draw_rectangle(code.rect()) print_args = (barcode_name(code),", "well so it should always be run at 640x480 in", "img.find_barcodes() for code in codes: img.draw_rectangle(code.rect()) print_args = (barcode_name(code), code.payload(),", "M7. Barcode detection does not work on the M4 Camera.", "\"CODE128\" while(True): clock.tick() img = sensor.snapshot() codes = img.find_barcodes() for", "example shows off how easy it is to detect bar", "sensor.set_framesize(sensor.VGA) # High Res! sensor.set_windowing((640, 80)) # V Res of", "\"I25\" if(code.type() == image.DATABAR): return \"DATABAR\" if(code.type() == image.DATABAR_EXP): return", "== image.DATABAR): return \"DATABAR\" if(code.type() == image.DATABAR_EXP): return \"DATABAR_EXP\" if(code.type()", "== image.DATABAR_EXP): return \"DATABAR_EXP\" if(code.type() == image.CODABAR): return \"CODABAR\" if(code.type()", "= time.clock() # Barcode detection can run at the full", "%f (degrees), quality %d, FPS %f\" % print_args) if not", "if(code.type() == image.EAN2): return \"EAN2\" if(code.type() == image.EAN5): return \"EAN5\"", "sensor, image, time, math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) # High Res!", "codes using the # OpenMV Cam M7. Barcode detection does", "work (40 for 2X the speed). sensor.skip_frames(time = 2000) sensor.set_auto_gain(False)", "== image.CODE93): return \"CODE93\" if(code.type() == image.CODE128): return \"CODE128\" while(True):", "\"UPCA\" if(code.type() == image.EAN13): return \"EAN13\" if(code.type() == image.ISBN13): return", "image.EAN8): return \"EAN8\" if(code.type() == image.UPCE): return \"UPCE\" if(code.type() ==", "img = sensor.snapshot() codes = img.find_barcodes() for code in codes:", "(180 * code.rotation()) / math.pi, code.quality(), clock.fps()) print(\"Barcode %s, Payload", "detection will also work in RGB565 mode but at #", "\"EAN13\" if(code.type() == image.ISBN13): return \"ISBN13\" if(code.type() == image.I25): return", "return \"EAN2\" if(code.type() == image.EAN5): return \"EAN5\" if(code.type() == image.EAN8):", "== less work (40 for 2X the speed). sensor.skip_frames(time =", "if(code.type() == image.DATABAR): return \"DATABAR\" if(code.type() == image.DATABAR_EXP): return \"DATABAR_EXP\"", "return \"DATABAR\" if(code.type() == image.DATABAR_EXP): return \"DATABAR_EXP\" if(code.type() == image.CODABAR):", "also work in RGB565 mode but at # a lower", "return \"CODABAR\" if(code.type() == image.CODE39): return \"CODE39\" if(code.type() == image.PDF417):", "shows off how easy it is to detect bar codes", "off to prevent image washout... clock = time.clock() # Barcode", "2X the speed). sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must turn", "if(code.type() == image.CODABAR): return \"CODABAR\" if(code.type() == image.CODE39): return \"CODE39\"", "on the M4 Camera. import sensor, image, time, math sensor.reset()", "if(code.type() == image.UPCA): return \"UPCA\" if(code.type() == image.EAN13): return \"EAN13\"", "math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) # High Res! sensor.set_windowing((640, 80)) #", "the speed). sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must turn this", "sensor.set_windowing((640, 80)) # V Res of 80 == less work", "will also work in RGB565 mode but at # a", "return \"DATABAR_EXP\" if(code.type() == image.CODABAR): return \"CODABAR\" if(code.type() == image.CODE39):", "sensor.snapshot() codes = img.find_barcodes() for code in codes: img.draw_rectangle(code.rect()) print_args", "if(code.type() == image.PDF417): return \"PDF417\" if(code.type() == image.CODE93): return \"CODE93\"", "# Barcode detection can run at the full 640x480 resolution", "to prevent image washout... clock = time.clock() # Barcode detection", "\\\"%s\\\", rotation %f (degrees), quality %d, FPS %f\" % print_args)", "2000) sensor.set_auto_gain(False) # must turn this off to prevent image", "FPS %f\" % print_args) if not codes: print(\"FPS %f\" %", "but at # a lower resolution. That said, barcode detection", "if(code.type() == image.CODE128): return \"CODE128\" while(True): clock.tick() img = sensor.snapshot()", "to detect bar codes using the # OpenMV Cam M7.", "image.EAN5): return \"EAN5\" if(code.type() == image.EAN8): return \"EAN8\" if(code.type() ==", "image washout... clock = time.clock() # Barcode detection can run", "def barcode_name(code): if(code.type() == image.EAN2): return \"EAN2\" if(code.type() == image.EAN5):", "return \"EAN8\" if(code.type() == image.UPCE): return \"UPCE\" if(code.type() == image.ISBN10):", "using the # OpenMV Cam M7. Barcode detection does not", "at the full 640x480 resolution of your OpenMV Cam's #", "== image.EAN5): return \"EAN5\" if(code.type() == image.EAN8): return \"EAN8\" if(code.type()", "code.rotation()) / math.pi, code.quality(), clock.fps()) print(\"Barcode %s, Payload \\\"%s\\\", rotation", "image.CODABAR): return \"CODABAR\" if(code.type() == image.CODE39): return \"CODE39\" if(code.type() ==", "quality %d, FPS %f\" % print_args) if not codes: print(\"FPS", "This example shows off how easy it is to detect", "easy it is to detect bar codes using the #", "must turn this off to prevent image washout... sensor.set_auto_whitebal(False) #", "# a lower resolution. That said, barcode detection requires a", "# OV7725 camera module. Barcode detection will also work in", "so it should always be run at 640x480 in grayscale...", "\"ISBN10\" if(code.type() == image.UPCA): return \"UPCA\" if(code.type() == image.EAN13): return", "== image.PDF417): return \"PDF417\" if(code.type() == image.CODE93): return \"CODE93\" if(code.type()", "requires a higher resolution # to work well so it", "the full 640x480 resolution of your OpenMV Cam's # OV7725", "Res! sensor.set_windowing((640, 80)) # V Res of 80 == less", "\"CODE39\" if(code.type() == image.PDF417): return \"PDF417\" if(code.type() == image.CODE93): return", "M4 Camera. import sensor, image, time, math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA)", "== image.UPCA): return \"UPCA\" if(code.type() == image.EAN13): return \"EAN13\" if(code.type()", "* code.rotation()) / math.pi, code.quality(), clock.fps()) print(\"Barcode %s, Payload \\\"%s\\\",", "= sensor.snapshot() codes = img.find_barcodes() for code in codes: img.draw_rectangle(code.rect())", "in codes: img.draw_rectangle(code.rect()) print_args = (barcode_name(code), code.payload(), (180 * code.rotation())", "code.quality(), clock.fps()) print(\"Barcode %s, Payload \\\"%s\\\", rotation %f (degrees), quality", "off how easy it is to detect bar codes using", "be run at 640x480 in grayscale... def barcode_name(code): if(code.type() ==", "\"DATABAR_EXP\" if(code.type() == image.CODABAR): return \"CODABAR\" if(code.type() == image.CODE39): return", "washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image", "while(True): clock.tick() img = sensor.snapshot() codes = img.find_barcodes() for code", "sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) # High Res! sensor.set_windowing((640, 80)) # V Res", "if(code.type() == image.EAN5): return \"EAN5\" if(code.type() == image.EAN8): return \"EAN8\"", "math.pi, code.quality(), clock.fps()) print(\"Barcode %s, Payload \\\"%s\\\", rotation %f (degrees),", "print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(),", "return \"UPCA\" if(code.type() == image.EAN13): return \"EAN13\" if(code.type() == image.ISBN13):", "(degrees), quality %d, FPS %f\" % print_args) if not codes:", "of your OpenMV Cam's # OV7725 camera module. Barcode detection", "barcode_name(code): if(code.type() == image.EAN2): return \"EAN2\" if(code.type() == image.EAN5): return", "for code in codes: img.draw_rectangle(code.rect()) print_args = (barcode_name(code), code.payload(), (180", "detection does not work on the M4 Camera. import sensor,", "can run at the full 640x480 resolution of your OpenMV", "clock.tick() img = sensor.snapshot() codes = img.find_barcodes() for code in", "codes: img.draw_rectangle(code.rect()) print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) /", "for 2X the speed). sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must", "image.ISBN13): return \"ISBN13\" if(code.type() == image.I25): return \"I25\" if(code.type() ==", "RGB565 mode but at # a lower resolution. That said,", "\"UPCE\" if(code.type() == image.ISBN10): return \"ISBN10\" if(code.type() == image.UPCA): return", "of 80 == less work (40 for 2X the speed).", "== image.ISBN13): return \"ISBN13\" if(code.type() == image.I25): return \"I25\" if(code.type()", "(barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps()) print(\"Barcode", "Barcode detection can run at the full 640x480 resolution of", "%s, Payload \\\"%s\\\", rotation %f (degrees), quality %d, FPS %f\"", "image.DATABAR_EXP): return \"DATABAR_EXP\" if(code.type() == image.CODABAR): return \"CODABAR\" if(code.type() ==", "Example # # This example shows off how easy it", "# must turn this off to prevent image washout... clock", "That said, barcode detection requires a higher resolution # to", "\"EAN5\" if(code.type() == image.EAN8): return \"EAN8\" if(code.type() == image.UPCE): return", "== image.ISBN10): return \"ISBN10\" if(code.type() == image.UPCA): return \"UPCA\" if(code.type()", "image.UPCE): return \"UPCE\" if(code.type() == image.ISBN10): return \"ISBN10\" if(code.type() ==", "# # This example shows off how easy it is", "return \"EAN13\" if(code.type() == image.ISBN13): return \"ISBN13\" if(code.type() == image.I25):", "sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...", "prevent image washout... clock = time.clock() # Barcode detection can", "if(code.type() == image.I25): return \"I25\" if(code.type() == image.DATABAR): return \"DATABAR\"", "image.DATABAR): return \"DATABAR\" if(code.type() == image.DATABAR_EXP): return \"DATABAR_EXP\" if(code.type() ==", "= (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps())", "resolution of your OpenMV Cam's # OV7725 camera module. Barcode", "== image.EAN13): return \"EAN13\" if(code.type() == image.ISBN13): return \"ISBN13\" if(code.type()", "640x480 resolution of your OpenMV Cam's # OV7725 camera module.", "import sensor, image, time, math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) # High", "Res of 80 == less work (40 for 2X the" ]
[ "<reponame>devaslooper/Code-Overflow n=int(input(\"Enter number \")) fact=1 for i in range(1,n+1): fact=fact*i", "n=int(input(\"Enter number \")) fact=1 for i in range(1,n+1): fact=fact*i print(\"Factorial", "\")) fact=1 for i in range(1,n+1): fact=fact*i print(\"Factorial is \",fact)", "number \")) fact=1 for i in range(1,n+1): fact=fact*i print(\"Factorial is" ]
[ "o paramêtro. Permite atribuir inumeros parametros. def maior(* num): contador", "= contador + 1 print(f'{v} ', end='', flush=True) time.sleep(0.3) if", "1: maior = v else: if v > maior: maior", "import time # O * é para desempacotar o paramêtro.", "if contador == 1: maior = v else: if v", "7) maior(5, 4, 7, 9, 2) maior(1, 4, 7, 20,", "if v > maior: maior = v print(f'Foram informado o", "maior = v else: if v > maior: maior =", "> maior: maior = v print(f'Foram informado o total de", "informado foi {max(num)}') print(30 * '-') maior(2, 1, 7) maior(5,", "inumeros parametros. def maior(* num): contador = maior = 0", "in num: contador = contador + 1 print(f'{v} ', end='',", "informado o total de {len(num)}') print(f'O maior valor informado foi", "total de {len(num)}') print(f'O maior valor informado foi {max(num)}') print(30", "maior(5, 4, 7, 9, 2) maior(1, 4, 7, 20, 2)", "para desempacotar o paramêtro. Permite atribuir inumeros parametros. def maior(*", "flush=True) time.sleep(0.3) if contador == 1: maior = v else:", "* '-') maior(2, 1, 7) maior(5, 4, 7, 9, 2)", "maior = 0 print('Analisando os valores passados...') for v in", "# O * é para desempacotar o paramêtro. Permite atribuir", "paramêtro. Permite atribuir inumeros parametros. def maior(* num): contador =", "Permite atribuir inumeros parametros. def maior(* num): contador = maior", "else: if v > maior: maior = v print(f'Foram informado", "valor informado foi {max(num)}') print(30 * '-') maior(2, 1, 7)", "', end='', flush=True) time.sleep(0.3) if contador == 1: maior =", "maior: maior = v print(f'Foram informado o total de {len(num)}')", "print(30 * '-') maior(2, 1, 7) maior(5, 4, 7, 9,", "num: contador = contador + 1 print(f'{v} ', end='', flush=True)", "print(f'O maior valor informado foi {max(num)}') print(30 * '-') maior(2,", "de {len(num)}') print(f'O maior valor informado foi {max(num)}') print(30 *", "O * é para desempacotar o paramêtro. Permite atribuir inumeros", "contador + 1 print(f'{v} ', end='', flush=True) time.sleep(0.3) if contador", "{len(num)}') print(f'O maior valor informado foi {max(num)}') print(30 * '-')", "1 print(f'{v} ', end='', flush=True) time.sleep(0.3) if contador == 1:", "contador = contador + 1 print(f'{v} ', end='', flush=True) time.sleep(0.3)", "== 1: maior = v else: if v > maior:", "= v print(f'Foram informado o total de {len(num)}') print(f'O maior", "o total de {len(num)}') print(f'O maior valor informado foi {max(num)}')", "desempacotar o paramêtro. Permite atribuir inumeros parametros. def maior(* num):", "passados...') for v in num: contador = contador + 1", "v print(f'Foram informado o total de {len(num)}') print(f'O maior valor", "print(f'Foram informado o total de {len(num)}') print(f'O maior valor informado", "maior(2, 1, 7) maior(5, 4, 7, 9, 2) maior(1, 4,", "<reponame>thiagofreitascarneiro/Curso-de-Python---Curso-em-Video import time # O * é para desempacotar o", "time # O * é para desempacotar o paramêtro. Permite", "maior valor informado foi {max(num)}') print(30 * '-') maior(2, 1,", "num): contador = maior = 0 print('Analisando os valores passados...')", "4, 7, 9, 2) maior(1, 4, 7, 20, 2) maior(0)", "print('Analisando os valores passados...') for v in num: contador =", "é para desempacotar o paramêtro. Permite atribuir inumeros parametros. def", "os valores passados...') for v in num: contador = contador", "{max(num)}') print(30 * '-') maior(2, 1, 7) maior(5, 4, 7,", "foi {max(num)}') print(30 * '-') maior(2, 1, 7) maior(5, 4,", "= 0 print('Analisando os valores passados...') for v in num:", "contador = maior = 0 print('Analisando os valores passados...') for", "maior(* num): contador = maior = 0 print('Analisando os valores", "0 print('Analisando os valores passados...') for v in num: contador", "+ 1 print(f'{v} ', end='', flush=True) time.sleep(0.3) if contador ==", "v > maior: maior = v print(f'Foram informado o total", "time.sleep(0.3) if contador == 1: maior = v else: if", "valores passados...') for v in num: contador = contador +", "maior = v print(f'Foram informado o total de {len(num)}') print(f'O", "def maior(* num): contador = maior = 0 print('Analisando os", "v in num: contador = contador + 1 print(f'{v} ',", "'-') maior(2, 1, 7) maior(5, 4, 7, 9, 2) maior(1,", "1, 7) maior(5, 4, 7, 9, 2) maior(1, 4, 7,", "print(f'{v} ', end='', flush=True) time.sleep(0.3) if contador == 1: maior", "parametros. def maior(* num): contador = maior = 0 print('Analisando", "= maior = 0 print('Analisando os valores passados...') for v", "for v in num: contador = contador + 1 print(f'{v}", "end='', flush=True) time.sleep(0.3) if contador == 1: maior = v", "contador == 1: maior = v else: if v >", "= v else: if v > maior: maior = v", "atribuir inumeros parametros. def maior(* num): contador = maior =", "* é para desempacotar o paramêtro. Permite atribuir inumeros parametros.", "v else: if v > maior: maior = v print(f'Foram" ]
[ "can only be 2-dimensional.\"): values = torch.rand((100, 50, 2), device=device)", "80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) with self.assertRaisesRegex(ValueError, \"input can", "device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) packed_to_padded(values, first_idxs, 100)", "100) with self.assertRaisesRegex(ValueError, \"input can only be 3-dimensional.\"): values =", "import unittest import torch from common_testing import TestCaseMixin, get_random_cuda_device from", "dtype=torch.int64, device=device ) verts_list.append(verts) faces_list.append(faces) meshes = Meshes(verts_list, faces_list) return", "device, ) # check forward self.assertClose(values_packed, values_packed_torch) # check backward", "= num_faces_per_mesh.max().item() if D == 0: values = torch.rand((len(meshes), max_faces),", "meshes = self.init_meshes(16, 100, 300, device=device) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx", "PyTorch implementations are the same. \"\"\" meshes = self.init_meshes(16, 100,", "values_packed_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(1),", "faces_list) return meshes @staticmethod def packed_to_padded_python(inputs, first_idxs, max_size, device): \"\"\"", "= values.detach().clone() values_torch.requires_grad = True values_packed = padded_to_packed( values, mesh_to_faces_packed_first_idx,", "\"cpu\" ): meshes = TestPackedToPadded.init_meshes( num_meshes, num_verts, num_faces, device )", "if D == 0: inputs_packed = torch.zeros((num_inputs,), device=device) else: inputs_packed", "@staticmethod def padded_to_packed_python(inputs, first_idxs, num_inputs, device): \"\"\" PyTorch implementation of", "num_meshes: int, num_verts: int, num_faces: int, num_d: int, device: str", "LICENSE file in the root directory of this source tree.", "self.assertRaisesRegex(ValueError, \"input can only be 2-dimensional.\"): values = torch.rand((100, 50,", "return inputs_padded @staticmethod def padded_to_packed_python(inputs, first_idxs, num_inputs, device): \"\"\" PyTorch", "= torch.device(device) verts_list = [] faces_list = [] for _", "else: f = first_idxs[m + 1] inputs_packed[s:f] = inputs[m, :f]", "\"\"\" meshes = self.init_meshes(16, 100, 300, device=device) mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()", "its affiliates. # All rights reserved. # # This source", "torch.rand((100, 50, 2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)", "= values.grad values_padded_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python( grad_inputs,", "meshes = self.init_meshes(16, 100, 300, device=device) mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() num_faces_per_mesh", "if D == 0: grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device) else: grad_inputs", "device=device, requires_grad=True) else: values = torch.rand((faces.shape[0], D), device=device, requires_grad=True) values_torch", "= torch.rand((len(meshes), max_faces, D), device=device) values_padded.backward(grad_inputs) grad_outputs = values.grad values_padded_torch.backward(grad_inputs)", "= TestPackedToPadded.padded_to_packed_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs,", "in enumerate(num_faces_per_mesh): values[i, num:] = 0 values.requires_grad = True values_torch", "== 0: inputs_packed = torch.zeros((num_inputs,), device=device) else: inputs_packed = torch.zeros((num_inputs,", "device) def test_padded_to_packed_D1_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(1, device) def test_padded_to_packed_D16_cuda(self):", "num_faces, device ) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces", "mesh_to_faces_packed_first_idx, values.size(1), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_padded_to_packed_flat_cpu(self):", "# All rights reserved. # # This source code is", "values = torch.rand((faces.shape[0],), device=device, requires_grad=True) else: values = torch.rand((faces.shape[0], D),", "self.assertClose(values_padded, values_padded_torch) # check backward if D == 0: grad_inputs", "0: values = torch.rand((faces.shape[0],), device=device, requires_grad=True) else: values = torch.rand((faces.shape[0],", "check backward if D == 0: grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device)", "test_invalid_inputs_shapes(self, device=\"cuda:0\"): with self.assertRaisesRegex(ValueError, \"input can only be 2-dimensional.\"): values", "values_padded_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(0),", "= first_idxs[m + 1] inputs_packed[s:f] = inputs[m, :f] return inputs_packed", "pytorch3d.structures.meshes import Meshes class TestPackedToPadded(TestCaseMixin, unittest.TestCase): def setUp(self) -> None:", "device=device) else: grad_inputs = torch.rand((len(meshes), max_faces, D), device=device) values_padded.backward(grad_inputs) grad_outputs", "values.detach().clone() values_torch.requires_grad = True values_padded = packed_to_padded( values, mesh_to_faces_packed_first_idx, max_faces", "grad_outputs = values.grad values_padded_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python(", "license found in the # LICENSE file in the root", "- 1: f = inputs.shape[0] else: f = first_idxs[m +", "inputs.dim() == 3 else 0 if D == 0: inputs_packed", "check forward self.assertClose(values_packed, values_packed_torch) # check backward if D ==", "int, num_faces: int, num_d: int, device: str = \"cpu\" ):", "== 0: values = torch.rand((len(meshes), max_faces), device=device) else: values =", ") values_packed_torch = TestPackedToPadded.padded_to_packed_python( values_torch, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item(), device, ) #", "device=device) packed_to_padded(values, first_idxs, 100) with self.assertRaisesRegex(ValueError, \"input can only be", "D == 0: grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device) else: grad_inputs =", "= \"cpu\", ): device = torch.device(device) verts_list = [] faces_list", "= first_idxs[m] if m == num_meshes - 1: f =", "== num_meshes - 1: f = num_inputs else: f =", "= torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def out(): packed_to_padded(values, mesh_to_faces_packed_first_idx, max_faces)", "True values_packed = padded_to_packed( values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item() ) values_packed_torch =", "= torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device ) verts_list.append(verts) faces_list.append(faces)", "def _test_padded_to_packed_helper(self, D, device): \"\"\" Check the results from packed_to_padded", "device=device) for m in range(num_meshes): s = first_idxs[m] if m", "class TestPackedToPadded(TestCaseMixin, unittest.TestCase): def setUp(self) -> None: super().setUp() torch.manual_seed(1) @staticmethod", "_test_padded_to_packed_helper(self, D, device): \"\"\" Check the results from packed_to_padded and", "TestCaseMixin, get_random_cuda_device from pytorch3d.ops import packed_to_padded, padded_to_packed from pytorch3d.structures.meshes import", "self.init_meshes(16, 100, 300, device=device) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()", "inputs_padded = torch.zeros((num_meshes, max_size), device=device) else: inputs_padded = torch.zeros((num_meshes, max_size,", "backward if D == 0: grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device) else:", "if D == 0: values = torch.rand((faces.shape[0],), device=device, requires_grad=True) else:", "device ) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces =", "first_idxs.size(0) D = inputs.shape[1] if inputs.dim() == 2 else 0", "values_packed_torch) # check backward if D == 0: grad_inputs =", "values.size(1), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_padded_to_packed_flat_cpu(self): self._test_padded_to_packed_helper(0,", "= TestPackedToPadded.padded_to_packed_python( values_torch, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item(), device, ) # check forward", "self._test_packed_to_padded_helper(1, device) def test_packed_to_padded_D16_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(16, device) def", "= torch.zeros((num_meshes, max_size, D), device=device) for m in range(num_meshes): s", "20) with self.assertRaisesRegex(ValueError, \"input can only be 3-dimensional.\"): values =", "num:] = 0 values.requires_grad = True values_torch = values.detach().clone() values_torch.requires_grad", "1: f = inputs.shape[0] else: f = first_idxs[m + 1]", "values_padded = packed_to_padded( values, mesh_to_faces_packed_first_idx, max_faces ) values_padded_torch = TestPackedToPadded.packed_to_padded_python(", "torch.cuda.synchronize() def out(): packed_to_padded(values, mesh_to_faces_packed_first_idx, max_faces) torch.cuda.synchronize() return out @staticmethod", "= 3000, device: str = \"cpu\", ): device = torch.device(device)", "= first_idxs[m + 1] inputs_padded[m, :f] = inputs[s:f] return inputs_padded", "self.assertRaisesRegex(ValueError, \"input can only be 3-dimensional.\"): values = torch.rand((100,), device=device)", "padded_to_packed from pytorch3d.structures.meshes import Meshes class TestPackedToPadded(TestCaseMixin, unittest.TestCase): def setUp(self)", "size=(num_faces, 3), dtype=torch.int64, device=device ) verts_list.append(verts) faces_list.append(faces) meshes = Meshes(verts_list,", "be 3-dimensional.\"): values = torch.rand((100, 50, 2, 2), device=device) first_idxs", "= get_random_cuda_device() self._test_padded_to_packed_helper(0, device) def test_padded_to_packed_D1_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(1,", "get_random_cuda_device() self._test_padded_to_packed_helper(0, device) def test_padded_to_packed_D1_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(1, device)", "torch.zeros((num_meshes, max_size), device=device) else: inputs_padded = torch.zeros((num_meshes, max_size, D), device=device)", "else: inputs_padded = torch.zeros((num_meshes, max_size, D), device=device) for m in", "D), device=device ) values_packed.backward(grad_inputs) grad_outputs = values.grad values_packed_torch.backward(grad_inputs) grad_outputs_torch1 =", "range(num_meshes): verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces = torch.randint(", "# check backward if D == 0: grad_inputs = torch.rand((num_faces_per_mesh.sum().item()),", "= torch.rand((num_faces_per_mesh.sum().item()), device=device) else: grad_inputs = torch.rand( (num_faces_per_mesh.sum().item(), D), device=device", "max_faces = meshes.num_faces_per_mesh().max().item() if D == 0: values = torch.rand((faces.shape[0],),", "_ in range(num_meshes): verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces", "values = torch.rand((len(meshes), max_faces), device=device) else: values = torch.rand((len(meshes), max_faces,", "max_faces ) values_padded_torch = TestPackedToPadded.packed_to_padded_python( values_torch, mesh_to_faces_packed_first_idx, max_faces, device )", "== num_meshes - 1: f = inputs.shape[0] else: f =", "else: f = first_idxs[m + 1] inputs_padded[m, :f] = inputs[s:f]", ") values_packed.backward(grad_inputs) grad_outputs = values.grad values_packed_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2", "torch from common_testing import TestCaseMixin, get_random_cuda_device from pytorch3d.ops import packed_to_padded,", "device = get_random_cuda_device() self._test_packed_to_padded_helper(0, device) def test_packed_to_padded_D1_cuda(self): device = get_random_cuda_device()", "def padded_to_packed_python(inputs, first_idxs, num_inputs, device): \"\"\" PyTorch implementation of padded_to_packed", "mesh_to_faces_packed_first_idx, values.size(0), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_packed_to_padded_flat_cpu(self):", "device=meshes.device) torch.cuda.synchronize() def out(): TestPackedToPadded.packed_to_padded_python( values, mesh_to_faces_packed_first_idx, max_faces, device )", "def test_padded_to_packed_D16_cpu(self): self._test_padded_to_packed_helper(16, \"cpu\") def test_padded_to_packed_flat_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(0,", "= torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def out(): TestPackedToPadded.packed_to_padded_python( values, mesh_to_faces_packed_first_idx,", "3000, device: str = \"cpu\", ): device = torch.device(device) verts_list", "else 0 if D == 0: inputs_padded = torch.zeros((num_meshes, max_size),", "torch.rand((100,), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs,", "\"cpu\") def test_padded_to_packed_D1_cpu(self): self._test_padded_to_packed_helper(1, \"cpu\") def test_padded_to_packed_D16_cpu(self): self._test_padded_to_packed_helper(16, \"cpu\") def", "packed_to_padded(values, mesh_to_faces_packed_first_idx, max_faces) torch.cuda.synchronize() return out @staticmethod def packed_to_padded_with_init_torch( num_meshes:", "0 if D == 0: inputs_packed = torch.zeros((num_inputs,), device=device) else:", "if m == num_meshes - 1: f = inputs.shape[0] else:", "device) def test_packed_to_padded_D16_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(16, device) def _test_padded_to_packed_helper(self,", "pytorch3d.ops import packed_to_padded, padded_to_packed from pytorch3d.structures.meshes import Meshes class TestPackedToPadded(TestCaseMixin,", "0: values = torch.rand((len(meshes), max_faces), device=device) else: values = torch.rand((len(meshes),", "\"cpu\") def test_packed_to_padded_D16_cpu(self): self._test_packed_to_padded_helper(16, \"cpu\") def test_packed_to_padded_flat_cuda(self): device = get_random_cuda_device()", "device=device) for i, num in enumerate(num_faces_per_mesh): values[i, num:] = 0", "= meshes.num_faces_per_mesh().max().item() if D == 0: values = torch.rand((faces.shape[0],), device=device,", "num_inputs, device): \"\"\" PyTorch implementation of padded_to_packed function. \"\"\" num_meshes", "with self.assertRaisesRegex(ValueError, \"input can only be 3-dimensional.\"): values = torch.rand((100,", "if m == num_meshes - 1: f = num_inputs else:", "max_faces = meshes.num_faces_per_mesh().max().item() if num_d == 0: values = torch.rand((faces.shape[0],),", "int = 1000, num_faces: int = 3000, device: str =", "inputs_packed def _test_packed_to_padded_helper(self, D, device): \"\"\" Check the results from", "implementation of padded_to_packed function. \"\"\" num_meshes = inputs.size(0) D =", "else: values = torch.rand((len(meshes), max_faces, D), device=device) for i, num", "import torch from common_testing import TestCaseMixin, get_random_cuda_device from pytorch3d.ops import", "- 1: f = num_inputs else: f = first_idxs[m +", "D, device): \"\"\" Check the results from packed_to_padded and PyTorch", "TestPackedToPadded.packed_to_padded_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2)", "inputs_padded = torch.zeros((num_meshes, max_size, D), device=device) for m in range(num_meshes):", "def packed_to_padded_python(inputs, first_idxs, max_size, device): \"\"\" PyTorch implementation of packed_to_padded", "f = first_idxs[m + 1] inputs_packed[s:f] = inputs[m, :f] return", "TestPackedToPadded.packed_to_padded_python( values_torch, mesh_to_faces_packed_first_idx, max_faces, device ) # check forward self.assertClose(values_padded,", "str = \"cpu\", ): device = torch.device(device) verts_list = []", "inputs_packed[s:f] = inputs[m, :f] return inputs_packed def _test_packed_to_padded_helper(self, D, device):", "same. \"\"\" meshes = self.init_meshes(16, 100, 300, device=device) mesh_to_faces_packed_first_idx =", "torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) @staticmethod def packed_to_padded_with_init(", "of packed_to_padded function. \"\"\" num_meshes = first_idxs.size(0) D = inputs.shape[1]", "self._test_padded_to_packed_helper(1, \"cpu\") def test_padded_to_packed_D16_cpu(self): self._test_padded_to_packed_helper(16, \"cpu\") def test_padded_to_packed_flat_cuda(self): device =", "= [] for _ in range(num_meshes): verts = torch.rand((num_verts, 3),", "source code is licensed under the BSD-style license found in", "in range(num_meshes): verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces =", "3), dtype=torch.int64, device=device ) verts_list.append(verts) faces_list.append(faces) meshes = Meshes(verts_list, faces_list)", "= True values_torch = values.detach().clone() values_torch.requires_grad = True values_packed =", "under the BSD-style license found in the # LICENSE file", "num_verts: int = 1000, num_faces: int = 3000, device: str", "TestPackedToPadded(TestCaseMixin, unittest.TestCase): def setUp(self) -> None: super().setUp() torch.manual_seed(1) @staticmethod def", "\"\"\" PyTorch implementation of padded_to_packed function. \"\"\" num_meshes = inputs.size(0)", "meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if D == 0: values =", "s = first_idxs[m] if m == num_meshes - 1: f", "num_verts, num_faces, device ) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()", "implementation of packed_to_padded function. \"\"\" num_meshes = first_idxs.size(0) D =", "def test_packed_to_padded_flat_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(0, device) def test_packed_to_padded_D1_cuda(self): device", "self._test_padded_to_packed_helper(16, \"cpu\") def test_padded_to_packed_flat_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(0, device) def", "device=\"cuda:0\"): with self.assertRaisesRegex(ValueError, \"input can only be 2-dimensional.\"): values =", "20) @staticmethod def packed_to_padded_with_init( num_meshes: int, num_verts: int, num_faces: int,", "mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if D == 0:", "mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item(), device, ) # check forward self.assertClose(values_packed, values_packed_torch) #", "1] inputs_padded[m, :f] = inputs[s:f] return inputs_padded @staticmethod def padded_to_packed_python(inputs,", "True values_torch = values.detach().clone() values_torch.requires_grad = True values_packed = padded_to_packed(", "max_faces, D), device=device) values_padded.backward(grad_inputs) grad_outputs = values.grad values_padded_torch.backward(grad_inputs) grad_outputs_torch1 =", "def out(): TestPackedToPadded.packed_to_padded_python( values, mesh_to_faces_packed_first_idx, max_faces, device ) torch.cuda.synchronize() return", "grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_packed_to_padded_flat_cpu(self): self._test_packed_to_padded_helper(0, \"cpu\") def test_packed_to_padded_D1_cpu(self): self._test_packed_to_padded_helper(1,", "values.grad values_packed_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python( grad_inputs, mesh_to_faces_packed_first_idx,", "num_verts: int, num_faces: int, num_d: int, device: str = \"cpu\"", "def test_packed_to_padded_D16_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(16, device) def _test_padded_to_packed_helper(self, D,", "out(): packed_to_padded(values, mesh_to_faces_packed_first_idx, max_faces) torch.cuda.synchronize() return out @staticmethod def packed_to_padded_with_init_torch(", "= torch.rand((len(meshes), max_faces), device=device) else: values = torch.rand((len(meshes), max_faces, D),", "first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) @staticmethod", "max_size, D), device=device) for m in range(num_meshes): s = first_idxs[m]", "grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1)", "None: super().setUp() torch.manual_seed(1) @staticmethod def init_meshes( num_meshes: int = 10,", "= torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces = torch.randint( num_verts, size=(num_faces,", "[] faces_list = [] for _ in range(num_meshes): verts =", "self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_padded_to_packed_flat_cpu(self): self._test_padded_to_packed_helper(0, \"cpu\") def test_padded_to_packed_D1_cpu(self):", "of padded_to_packed function. \"\"\" num_meshes = inputs.size(0) D = inputs.shape[2]", "= torch.rand((100, 50, 2, 2), device=device) first_idxs = torch.tensor([0, 80],", "@staticmethod def init_meshes( num_meshes: int = 10, num_verts: int =", "function. \"\"\" num_meshes = inputs.size(0) D = inputs.shape[2] if inputs.dim()", "\"cpu\", ): device = torch.device(device) verts_list = [] faces_list =", "def test_padded_to_packed_flat_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(0, device) def test_padded_to_packed_D1_cuda(self): device", "can only be 3-dimensional.\"): values = torch.rand((100,), device=device) first_idxs =", "== 2 else 0 if D == 0: inputs_padded =", "first_idxs, 100) with self.assertRaisesRegex(ValueError, \"input can only be 3-dimensional.\"): values", "mesh_to_faces_packed_first_idx, max_faces) torch.cuda.synchronize() return out @staticmethod def packed_to_padded_with_init_torch( num_meshes: int,", "= torch.zeros((num_inputs, D), device=device) for m in range(num_meshes): s =", "else: grad_inputs = torch.rand((len(meshes), max_faces, D), device=device) values_padded.backward(grad_inputs) grad_outputs =", ") self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_packed_to_padded_flat_cpu(self): self._test_packed_to_padded_helper(0, \"cpu\") def", "values.size(0), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_packed_to_padded_flat_cpu(self): self._test_packed_to_padded_helper(0,", "packed_to_padded( values, mesh_to_faces_packed_first_idx, max_faces ) values_padded_torch = TestPackedToPadded.packed_to_padded_python( values_torch, mesh_to_faces_packed_first_idx,", "values.grad values_padded_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python( grad_inputs, mesh_to_faces_packed_first_idx,", "Check the results from packed_to_padded and PyTorch implementations are the", "torch.rand((100, 50, 2, 2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64,", "device) def test_packed_to_padded_D1_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(1, device) def test_packed_to_padded_D16_cuda(self):", "1: f = num_inputs else: f = first_idxs[m + 1]", "torch.rand((num_faces_per_mesh.sum().item()), device=device) else: grad_inputs = torch.rand( (num_faces_per_mesh.sum().item(), D), device=device )", "torch.cuda.synchronize() return out @staticmethod def packed_to_padded_with_init_torch( num_meshes: int, num_verts: int,", "out @staticmethod def packed_to_padded_with_init_torch( num_meshes: int, num_verts: int, num_faces: int,", "torch.rand((len(meshes), max_faces), device=device) else: grad_inputs = torch.rand((len(meshes), max_faces, D), device=device)", "# LICENSE file in the root directory of this source", "inputs.shape[0] else: f = first_idxs[m + 1] inputs_padded[m, :f] =", "num_meshes = inputs.size(0) D = inputs.shape[2] if inputs.dim() == 3", "device=device) else: inputs_padded = torch.zeros((num_meshes, max_size, D), device=device) for m", "torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) with self.assertRaisesRegex(ValueError, \"input", "+ 1] inputs_padded[m, :f] = inputs[s:f] return inputs_padded @staticmethod def", "num_meshes: int = 10, num_verts: int = 1000, num_faces: int", "self._test_padded_to_packed_helper(1, device) def test_padded_to_packed_D16_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(16, device) def", "found in the # LICENSE file in the root directory", "def packed_to_padded_with_init( num_meshes: int, num_verts: int, num_faces: int, num_d: int,", "def test_padded_to_packed_D1_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(1, device) def test_padded_to_packed_D16_cuda(self): device", "D = inputs.shape[1] if inputs.dim() == 2 else 0 if", "num_faces_per_mesh.sum().item() ) values_packed_torch = TestPackedToPadded.padded_to_packed_python( values_torch, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item(), device, )", "grad_inputs = torch.rand( (num_faces_per_mesh.sum().item(), D), device=device ) values_packed.backward(grad_inputs) grad_outputs =", "self._test_padded_to_packed_helper(16, device) def test_invalid_inputs_shapes(self, device=\"cuda:0\"): with self.assertRaisesRegex(ValueError, \"input can only", "num_verts, size=(num_faces, 3), dtype=torch.int64, device=device ) verts_list.append(verts) faces_list.append(faces) meshes =", "100, 300, device=device) mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() num_faces_per_mesh = meshes.num_faces_per_mesh() max_faces", "num_faces_per_mesh.sum().item(), device, ) # check forward self.assertClose(values_packed, values_packed_torch) # check", "== 0: values = torch.rand((faces.shape[0],), device=device, requires_grad=True) else: values =", "80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) @staticmethod def packed_to_padded_with_init( num_meshes:", "# check backward if D == 0: grad_inputs = torch.rand((len(meshes),", "= values_torch.grad grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device )", "device=device) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item()", "inputs.dim() == 2 else 0 if D == 0: inputs_padded", "def test_invalid_inputs_shapes(self, device=\"cuda:0\"): with self.assertRaisesRegex(ValueError, \"input can only be 2-dimensional.\"):", "padded_to_packed(values, first_idxs, 20) @staticmethod def packed_to_padded_with_init( num_meshes: int, num_verts: int,", "verts_list = [] faces_list = [] for _ in range(num_meshes):", "= torch.rand((len(meshes), max_faces, D), device=device) for i, num in enumerate(num_faces_per_mesh):", "grad_inputs = torch.rand((len(meshes), max_faces), device=device) else: grad_inputs = torch.rand((len(meshes), max_faces,", "self._test_padded_to_packed_helper(0, device) def test_padded_to_packed_D1_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(1, device) def", "values_torch.grad grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device ) self.assertClose(grad_outputs,", "init_meshes( num_meshes: int = 10, num_verts: int = 1000, num_faces:", "D), device=device) values_padded.backward(grad_inputs) grad_outputs = values.grad values_padded_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad", "only be 2-dimensional.\"): values = torch.rand((100, 50, 2), device=device) first_idxs", "max_faces = num_faces_per_mesh.max().item() if D == 0: values = torch.rand((len(meshes),", "2 else 0 if D == 0: inputs_padded = torch.zeros((num_meshes,", "device = get_random_cuda_device() self._test_padded_to_packed_helper(0, device) def test_padded_to_packed_D1_cuda(self): device = get_random_cuda_device()", "inputs[s:f] return inputs_padded @staticmethod def padded_to_packed_python(inputs, first_idxs, num_inputs, device): \"\"\"", "source tree. import unittest import torch from common_testing import TestCaseMixin,", "meshes.mesh_to_faces_packed_first_idx() num_faces_per_mesh = meshes.num_faces_per_mesh() max_faces = num_faces_per_mesh.max().item() if D ==", "@staticmethod def packed_to_padded_with_init_torch( num_meshes: int, num_verts: int, num_faces: int, num_d:", "in range(num_meshes): s = first_idxs[m] if m == num_meshes -", "@staticmethod def packed_to_padded_python(inputs, first_idxs, max_size, device): \"\"\" PyTorch implementation of", "test_padded_to_packed_D16_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(16, device) def test_invalid_inputs_shapes(self, device=\"cuda:0\"): with", "else: values = torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def out(): TestPackedToPadded.packed_to_padded_python(", "= Meshes(verts_list, faces_list) return meshes @staticmethod def packed_to_padded_python(inputs, first_idxs, max_size,", "= torch.tensor([0, 80], dtype=torch.int64, device=device) packed_to_padded(values, first_idxs, 100) with self.assertRaisesRegex(ValueError,", "): device = torch.device(device) verts_list = [] faces_list = []", "licensed under the BSD-style license found in the # LICENSE", "D == 0: grad_inputs = torch.rand((len(meshes), max_faces), device=device) else: grad_inputs", "if D == 0: values = torch.rand((len(meshes), max_faces), device=device) else:", "grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def", "grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_padded_to_packed_flat_cpu(self): self._test_padded_to_packed_helper(0, \"cpu\") def test_padded_to_packed_D1_cpu(self): self._test_padded_to_packed_helper(1,", "the results from packed_to_padded and PyTorch implementations are the same.", "num_faces: int, num_d: int, device: str = \"cpu\" ): meshes", "All rights reserved. # # This source code is licensed", "TestPackedToPadded.padded_to_packed_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2)", "values_torch, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item(), device, ) # check forward self.assertClose(values_packed, values_packed_torch)", "values_torch, mesh_to_faces_packed_first_idx, max_faces, device ) # check forward self.assertClose(values_padded, values_padded_torch)", "the same. \"\"\" meshes = self.init_meshes(16, 100, 300, device=device) faces", "test_padded_to_packed_flat_cpu(self): self._test_padded_to_packed_helper(0, \"cpu\") def test_padded_to_packed_D1_cpu(self): self._test_padded_to_packed_helper(1, \"cpu\") def test_padded_to_packed_D16_cpu(self): self._test_padded_to_packed_helper(16,", "= 1000, num_faces: int = 3000, device: str = \"cpu\",", "test_padded_to_packed_D1_cpu(self): self._test_padded_to_packed_helper(1, \"cpu\") def test_padded_to_packed_D16_cpu(self): self._test_padded_to_packed_helper(16, \"cpu\") def test_padded_to_packed_flat_cuda(self): device", "packed_to_padded, padded_to_packed from pytorch3d.structures.meshes import Meshes class TestPackedToPadded(TestCaseMixin, unittest.TestCase): def", "file in the root directory of this source tree. import", "TestPackedToPadded.init_meshes( num_meshes, num_verts, num_faces, device ) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx", "self._test_packed_to_padded_helper(0, device) def test_packed_to_padded_D1_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(1, device) def", "only be 3-dimensional.\"): values = torch.rand((100,), device=device) first_idxs = torch.tensor([0,", "meshes = Meshes(verts_list, faces_list) return meshes @staticmethod def packed_to_padded_python(inputs, first_idxs,", "packed_to_padded function. \"\"\" num_meshes = first_idxs.size(0) D = inputs.shape[1] if", "device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20)", "rights reserved. # # This source code is licensed under", "= \"cpu\" ): meshes = TestPackedToPadded.init_meshes( num_meshes, num_verts, num_faces, device", "affiliates. # All rights reserved. # # This source code", "@staticmethod def packed_to_padded_with_init( num_meshes: int, num_verts: int, num_faces: int, num_d:", "torch.rand((faces.shape[0],), device=device, requires_grad=True) else: values = torch.rand((faces.shape[0], D), device=device, requires_grad=True)", "def test_packed_to_padded_D1_cpu(self): self._test_packed_to_padded_helper(1, \"cpu\") def test_packed_to_padded_D16_cpu(self): self._test_packed_to_padded_helper(16, \"cpu\") def test_packed_to_padded_flat_cuda(self):", "root directory of this source tree. import unittest import torch", "2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs,", "packed_to_padded_with_init( num_meshes: int, num_verts: int, num_faces: int, num_d: int, device:", "f = first_idxs[m + 1] inputs_padded[m, :f] = inputs[s:f] return", "values_torch = values.detach().clone() values_torch.requires_grad = True values_padded = packed_to_padded( values,", "device) def test_invalid_inputs_shapes(self, device=\"cuda:0\"): with self.assertRaisesRegex(ValueError, \"input can only be", "\"cpu\") def test_padded_to_packed_flat_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(0, device) def test_padded_to_packed_D1_cuda(self):", "values = torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def out(): packed_to_padded(values, mesh_to_faces_packed_first_idx,", "in the root directory of this source tree. import unittest", "True values_padded = packed_to_padded( values, mesh_to_faces_packed_first_idx, max_faces ) values_padded_torch =", "first_idxs[m + 1] inputs_padded[m, :f] = inputs[s:f] return inputs_padded @staticmethod", "values_padded_torch = TestPackedToPadded.packed_to_padded_python( values_torch, mesh_to_faces_packed_first_idx, max_faces, device ) # check", "mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if num_d == 0:", "= 0 values.requires_grad = True values_torch = values.detach().clone() values_torch.requires_grad =", "= 10, num_verts: int = 1000, num_faces: int = 3000,", "= inputs[s:f] return inputs_padded @staticmethod def padded_to_packed_python(inputs, first_idxs, num_inputs, device):", "f = num_inputs else: f = first_idxs[m + 1] inputs_packed[s:f]", "self.assertClose(grad_outputs, grad_outputs_torch2) def test_padded_to_packed_flat_cpu(self): self._test_padded_to_packed_helper(0, \"cpu\") def test_padded_to_packed_D1_cpu(self): self._test_padded_to_packed_helper(1, \"cpu\")", "values_padded.backward(grad_inputs) grad_outputs = values.grad values_padded_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 =", "num_meshes - 1: f = inputs.shape[0] else: f = first_idxs[m", "torch.zeros((num_meshes, max_size, D), device=device) for m in range(num_meshes): s =", "= meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if D == 0: values", "can only be 3-dimensional.\"): values = torch.rand((100, 50, 2, 2),", "mesh_to_faces_packed_first_idx, max_faces, device ) # check forward self.assertClose(values_padded, values_padded_torch) #", "def _test_packed_to_padded_helper(self, D, device): \"\"\" Check the results from packed_to_padded", "first_idxs, max_size, device): \"\"\" PyTorch implementation of packed_to_padded function. \"\"\"", "== 0: grad_inputs = torch.rand((len(meshes), max_faces), device=device) else: grad_inputs =", "self.assertClose(grad_outputs, grad_outputs_torch2) def test_packed_to_padded_flat_cpu(self): self._test_packed_to_padded_helper(0, \"cpu\") def test_packed_to_padded_D1_cpu(self): self._test_packed_to_padded_helper(1, \"cpu\")", "num_faces: int = 3000, device: str = \"cpu\", ): device", "meshes.num_faces_per_mesh() max_faces = num_faces_per_mesh.max().item() if D == 0: values =", "D), device=device) for i, num in enumerate(num_faces_per_mesh): values[i, num:] =", ") self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_padded_to_packed_flat_cpu(self): self._test_padded_to_packed_helper(0, \"cpu\") def", "device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_padded_to_packed_flat_cpu(self): self._test_padded_to_packed_helper(0, \"cpu\")", "device = get_random_cuda_device() self._test_padded_to_packed_helper(16, device) def test_invalid_inputs_shapes(self, device=\"cuda:0\"): with self.assertRaisesRegex(ValueError,", "values = torch.rand((faces.shape[0], D), device=device, requires_grad=True) values_torch = values.detach().clone() values_torch.requires_grad", "= inputs[m, :f] return inputs_packed def _test_packed_to_padded_helper(self, D, device): \"\"\"", "meshes.num_faces_per_mesh().max().item() if D == 0: values = torch.rand((faces.shape[0],), device=device, requires_grad=True)", "values_padded_torch) # check backward if D == 0: grad_inputs =", "num_faces_per_mesh = meshes.num_faces_per_mesh() max_faces = num_faces_per_mesh.max().item() if D == 0:", "grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1)", ") verts_list.append(verts) faces_list.append(faces) meshes = Meshes(verts_list, faces_list) return meshes @staticmethod", "inputs.shape[1] if inputs.dim() == 2 else 0 if D ==", "check forward self.assertClose(values_padded, values_padded_torch) # check backward if D ==", "test_packed_to_padded_D16_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(16, device) def _test_padded_to_packed_helper(self, D, device):", "get_random_cuda_device() self._test_packed_to_padded_helper(16, device) def _test_padded_to_packed_helper(self, D, device): \"\"\" Check the", "values = torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def out(): TestPackedToPadded.packed_to_padded_python( values,", "values_torch.grad grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device ) self.assertClose(grad_outputs,", "= torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) @staticmethod def", ":f] = inputs[s:f] return inputs_padded @staticmethod def padded_to_packed_python(inputs, first_idxs, num_inputs,", "packed_to_padded_with_init_torch( num_meshes: int, num_verts: int, num_faces: int, num_d: int, device:", "in the # LICENSE file in the root directory of", "first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) packed_to_padded(values, first_idxs, 100) with", "first_idxs, 20) with self.assertRaisesRegex(ValueError, \"input can only be 3-dimensional.\"): values", "== 0: inputs_padded = torch.zeros((num_meshes, max_size), device=device) else: inputs_padded =", "= self.init_meshes(16, 100, 300, device=device) mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() num_faces_per_mesh =", "max_size), device=device) else: inputs_padded = torch.zeros((num_meshes, max_size, D), device=device) for", "device = get_random_cuda_device() self._test_padded_to_packed_helper(1, device) def test_padded_to_packed_D16_cuda(self): device = get_random_cuda_device()", "torch.zeros((num_inputs,), device=device) else: inputs_packed = torch.zeros((num_inputs, D), device=device) for m", "<gh_stars>0 # Copyright (c) Facebook, Inc. and its affiliates. #", "int, num_verts: int, num_faces: int, num_d: int, device: str =", "torch.rand((len(meshes), max_faces, D), device=device) for i, num in enumerate(num_faces_per_mesh): values[i,", "# This source code is licensed under the BSD-style license", "device=device ) verts_list.append(verts) faces_list.append(faces) meshes = Meshes(verts_list, faces_list) return meshes", "padded_to_packed( values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item() ) values_packed_torch = TestPackedToPadded.padded_to_packed_python( values_torch, mesh_to_faces_packed_first_idx,", "= get_random_cuda_device() self._test_padded_to_packed_helper(1, device) def test_padded_to_packed_D16_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(16,", "get_random_cuda_device() self._test_padded_to_packed_helper(1, device) def test_padded_to_packed_D16_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(16, device)", "device) def test_padded_to_packed_D16_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(16, device) def test_invalid_inputs_shapes(self,", "results from packed_to_padded and PyTorch implementations are the same. \"\"\"", "80], dtype=torch.int64, device=device) packed_to_padded(values, first_idxs, 100) with self.assertRaisesRegex(ValueError, \"input can", "get_random_cuda_device() self._test_packed_to_padded_helper(0, device) def test_packed_to_padded_D1_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(1, device)", "def init_meshes( num_meshes: int = 10, num_verts: int = 1000,", "# # This source code is licensed under the BSD-style", "D), device=device, requires_grad=True) values_torch = values.detach().clone() values_torch.requires_grad = True values_padded", "def test_packed_to_padded_D1_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(1, device) def test_packed_to_padded_D16_cuda(self): device", "test_padded_to_packed_D16_cpu(self): self._test_padded_to_packed_helper(16, \"cpu\") def test_padded_to_packed_flat_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(0, device)", "code is licensed under the BSD-style license found in the", "2, 2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values,", "max_faces) torch.cuda.synchronize() return out @staticmethod def packed_to_padded_with_init_torch( num_meshes: int, num_verts:", "packed_to_padded_python(inputs, first_idxs, max_size, device): \"\"\" PyTorch implementation of packed_to_padded function.", "_test_packed_to_padded_helper(self, D, device): \"\"\" Check the results from packed_to_padded and", "This source code is licensed under the BSD-style license found", "\"input can only be 2-dimensional.\"): values = torch.rand((100, 50, 2),", "= packed_to_padded( values, mesh_to_faces_packed_first_idx, max_faces ) values_padded_torch = TestPackedToPadded.packed_to_padded_python( values_torch,", "test_padded_to_packed_D1_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(1, device) def test_padded_to_packed_D16_cuda(self): device =", "= num_inputs else: f = first_idxs[m + 1] inputs_packed[s:f] =", "D == 0: values = torch.rand((len(meshes), max_faces), device=device) else: values", "= meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if num_d", "torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def out(): packed_to_padded(values, mesh_to_faces_packed_first_idx, max_faces) torch.cuda.synchronize()", "self._test_padded_to_packed_helper(0, \"cpu\") def test_padded_to_packed_D1_cpu(self): self._test_padded_to_packed_helper(1, \"cpu\") def test_padded_to_packed_D16_cpu(self): self._test_padded_to_packed_helper(16, \"cpu\")", "\"input can only be 3-dimensional.\"): values = torch.rand((100,), device=device) first_idxs", "if D == 0: inputs_padded = torch.zeros((num_meshes, max_size), device=device) else:", "unittest import torch from common_testing import TestCaseMixin, get_random_cuda_device from pytorch3d.ops", "device=meshes.device) else: values = torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def out():", "values, mesh_to_faces_packed_first_idx, max_faces ) values_padded_torch = TestPackedToPadded.packed_to_padded_python( values_torch, mesh_to_faces_packed_first_idx, max_faces,", "the same. \"\"\" meshes = self.init_meshes(16, 100, 300, device=device) mesh_to_faces_packed_first_idx", "values_torch.requires_grad = True values_packed = padded_to_packed( values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item() )", "meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if D ==", "get_random_cuda_device from pytorch3d.ops import packed_to_padded, padded_to_packed from pytorch3d.structures.meshes import Meshes", "import TestCaseMixin, get_random_cuda_device from pytorch3d.ops import packed_to_padded, padded_to_packed from pytorch3d.structures.meshes", "torch.zeros((num_inputs, D), device=device) for m in range(num_meshes): s = first_idxs[m]", "common_testing import TestCaseMixin, get_random_cuda_device from pytorch3d.ops import packed_to_padded, padded_to_packed from", "int, device: str = \"cpu\" ): meshes = TestPackedToPadded.init_meshes( num_meshes,", "int, num_d: int, device: str = \"cpu\" ): meshes =", "torch.rand((len(meshes), max_faces, D), device=device) values_padded.backward(grad_inputs) grad_outputs = values.grad values_padded_torch.backward(grad_inputs) grad_outputs_torch1", "forward self.assertClose(values_padded, values_padded_torch) # check backward if D == 0:", "= get_random_cuda_device() self._test_packed_to_padded_helper(1, device) def test_packed_to_padded_D16_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(16,", "def test_padded_to_packed_flat_cpu(self): self._test_padded_to_packed_helper(0, \"cpu\") def test_padded_to_packed_D1_cpu(self): self._test_padded_to_packed_helper(1, \"cpu\") def test_padded_to_packed_D16_cpu(self):", "device = torch.device(device) verts_list = [] faces_list = [] for", "packed_to_padded and PyTorch implementations are the same. \"\"\" meshes =", "directory of this source tree. import unittest import torch from", "device): \"\"\" Check the results from packed_to_padded and PyTorch implementations", "test_packed_to_padded_D1_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(1, device) def test_packed_to_padded_D16_cuda(self): device =", "faces_list.append(faces) meshes = Meshes(verts_list, faces_list) return meshes @staticmethod def packed_to_padded_python(inputs,", "= meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if D", "device=device) else: grad_inputs = torch.rand( (num_faces_per_mesh.sum().item(), D), device=device ) values_packed.backward(grad_inputs)", "inputs_packed = torch.zeros((num_inputs, D), device=device) for m in range(num_meshes): s", "padded_to_packed(values, first_idxs, 20) with self.assertRaisesRegex(ValueError, \"input can only be 3-dimensional.\"):", "self._test_packed_to_padded_helper(0, \"cpu\") def test_packed_to_padded_D1_cpu(self): self._test_packed_to_padded_helper(1, \"cpu\") def test_packed_to_padded_D16_cpu(self): self._test_packed_to_padded_helper(16, \"cpu\")", "device = get_random_cuda_device() self._test_packed_to_padded_helper(1, device) def test_packed_to_padded_D16_cuda(self): device = get_random_cuda_device()", "else: values = torch.rand((faces.shape[0], D), device=device, requires_grad=True) values_torch = values.detach().clone()", "= values.detach().clone() values_torch.requires_grad = True values_padded = packed_to_padded( values, mesh_to_faces_packed_first_idx,", "[] for _ in range(num_meshes): verts = torch.rand((num_verts, 3), dtype=torch.float32,", "(c) Facebook, Inc. and its affiliates. # All rights reserved.", "100, 300, device=device) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces", "= meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if num_d == 0: values", "2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) packed_to_padded(values, first_idxs,", "3), dtype=torch.float32, device=device) faces = torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64,", "= torch.rand( (num_faces_per_mesh.sum().item(), D), device=device ) values_packed.backward(grad_inputs) grad_outputs = values.grad", "torch.rand((len(meshes), max_faces), device=device) else: values = torch.rand((len(meshes), max_faces, D), device=device)", "else: values = torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def out(): packed_to_padded(values,", "\"cpu\") def test_packed_to_padded_D1_cpu(self): self._test_packed_to_padded_helper(1, \"cpu\") def test_packed_to_padded_D16_cpu(self): self._test_packed_to_padded_helper(16, \"cpu\") def", "def test_padded_to_packed_D1_cpu(self): self._test_padded_to_packed_helper(1, \"cpu\") def test_padded_to_packed_D16_cpu(self): self._test_padded_to_packed_helper(16, \"cpu\") def test_padded_to_packed_flat_cuda(self):", "def test_padded_to_packed_D16_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(16, device) def test_invalid_inputs_shapes(self, device=\"cuda:0\"):", "faces_list = [] for _ in range(num_meshes): verts = torch.rand((num_verts,", "device=device) faces = torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device )", "device=meshes.device) torch.cuda.synchronize() def out(): packed_to_padded(values, mesh_to_faces_packed_first_idx, max_faces) torch.cuda.synchronize() return out", "torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def out(): TestPackedToPadded.packed_to_padded_python( values, mesh_to_faces_packed_first_idx, max_faces,", "# Copyright (c) Facebook, Inc. and its affiliates. # All", "verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces = torch.randint( num_verts,", "= torch.rand((100, 50, 2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64,", "function. \"\"\" num_meshes = first_idxs.size(0) D = inputs.shape[1] if inputs.dim()", "num in enumerate(num_faces_per_mesh): values[i, num:] = 0 values.requires_grad = True", "\"\"\" PyTorch implementation of packed_to_padded function. \"\"\" num_meshes = first_idxs.size(0)", "first_idxs[m + 1] inputs_packed[s:f] = inputs[m, :f] return inputs_packed def", "num_meshes, num_verts, num_faces, device ) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx =", "= True values_packed = padded_to_packed( values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item() ) values_packed_torch", "PyTorch implementation of padded_to_packed function. \"\"\" num_meshes = inputs.size(0) D", "test_packed_to_padded_flat_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(0, device) def test_packed_to_padded_D1_cuda(self): device =", "-> None: super().setUp() torch.manual_seed(1) @staticmethod def init_meshes( num_meshes: int =", "and its affiliates. # All rights reserved. # # This", "values_torch = values.detach().clone() values_torch.requires_grad = True values_packed = padded_to_packed( values,", "inputs_padded @staticmethod def padded_to_packed_python(inputs, first_idxs, num_inputs, device): \"\"\" PyTorch implementation", "the # LICENSE file in the root directory of this", "faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if", "be 3-dimensional.\"): values = torch.rand((100,), device=device) first_idxs = torch.tensor([0, 80],", "num_meshes = first_idxs.size(0) D = inputs.shape[1] if inputs.dim() == 2", "\"\"\" meshes = self.init_meshes(16, 100, 300, device=device) faces = meshes.faces_packed()", "meshes @staticmethod def packed_to_padded_python(inputs, first_idxs, max_size, device): \"\"\" PyTorch implementation", ") # check forward self.assertClose(values_padded, values_padded_torch) # check backward if", "int = 3000, device: str = \"cpu\", ): device =", "torch.rand((faces.shape[0],), device=meshes.device) else: values = torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize() def", "num_d), device=meshes.device) torch.cuda.synchronize() def out(): packed_to_padded(values, mesh_to_faces_packed_first_idx, max_faces) torch.cuda.synchronize() return", "test_padded_to_packed_flat_cuda(self): device = get_random_cuda_device() self._test_padded_to_packed_helper(0, device) def test_padded_to_packed_D1_cuda(self): device =", "requires_grad=True) else: values = torch.rand((faces.shape[0], D), device=device, requires_grad=True) values_torch =", "check backward if D == 0: grad_inputs = torch.rand((len(meshes), max_faces),", "if inputs.dim() == 2 else 0 if D == 0:", "= torch.rand((len(meshes), max_faces), device=device) else: grad_inputs = torch.rand((len(meshes), max_faces, D),", "0: grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device) else: grad_inputs = torch.rand( (num_faces_per_mesh.sum().item(),", "D), device=device) for m in range(num_meshes): s = first_idxs[m] if", "device): \"\"\" PyTorch implementation of packed_to_padded function. \"\"\" num_meshes =", "grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device", "== 0: grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device) else: grad_inputs = torch.rand(", "50, 2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) packed_to_padded(values,", "\"\"\" num_meshes = first_idxs.size(0) D = inputs.shape[1] if inputs.dim() ==", "str = \"cpu\" ): meshes = TestPackedToPadded.init_meshes( num_meshes, num_verts, num_faces,", "inputs_packed = torch.zeros((num_inputs,), device=device) else: inputs_packed = torch.zeros((num_inputs, D), device=device)", "mesh_to_faces_packed_first_idx, max_faces ) values_padded_torch = TestPackedToPadded.packed_to_padded_python( values_torch, mesh_to_faces_packed_first_idx, max_faces, device", "values_packed = padded_to_packed( values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item() ) values_packed_torch = TestPackedToPadded.padded_to_packed_python(", "device=device) padded_to_packed(values, first_idxs, 20) @staticmethod def packed_to_padded_with_init( num_meshes: int, num_verts:", "for i, num in enumerate(num_faces_per_mesh): values[i, num:] = 0 values.requires_grad", "0: inputs_packed = torch.zeros((num_inputs,), device=device) else: inputs_packed = torch.zeros((num_inputs, D),", "= torch.rand((faces.shape[0],), device=meshes.device) else: values = torch.rand((faces.shape[0], num_d), device=meshes.device) torch.cuda.synchronize()", "3 else 0 if D == 0: inputs_packed = torch.zeros((num_inputs,),", "= inputs.shape[2] if inputs.dim() == 3 else 0 if D", "values[i, num:] = 0 values.requires_grad = True values_torch = values.detach().clone()", "device ) # check forward self.assertClose(values_padded, values_padded_torch) # check backward", "def out(): packed_to_padded(values, mesh_to_faces_packed_first_idx, max_faces) torch.cuda.synchronize() return out @staticmethod def", "return out @staticmethod def packed_to_padded_with_init_torch( num_meshes: int, num_verts: int, num_faces:", "m == num_meshes - 1: f = inputs.shape[0] else: f", "= torch.rand((faces.shape[0],), device=device, requires_grad=True) else: values = torch.rand((faces.shape[0], D), device=device,", "3-dimensional.\"): values = torch.rand((100,), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64,", "else: grad_inputs = torch.rand( (num_faces_per_mesh.sum().item(), D), device=device ) values_packed.backward(grad_inputs) grad_outputs", "D == 0: inputs_packed = torch.zeros((num_inputs,), device=device) else: inputs_packed =", "values_packed.backward(grad_inputs) grad_outputs = values.grad values_packed_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 =", "values.detach().clone() values_torch.requires_grad = True values_packed = padded_to_packed( values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item()", "inputs.shape[2] if inputs.dim() == 3 else 0 if D ==", "0 values.requires_grad = True values_torch = values.detach().clone() values_torch.requires_grad = True", "device=device) values_padded.backward(grad_inputs) grad_outputs = values.grad values_padded_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2", "dtype=torch.float32, device=device) faces = torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device", "if inputs.dim() == 3 else 0 if D == 0:", "test_packed_to_padded_flat_cpu(self): self._test_packed_to_padded_helper(0, \"cpu\") def test_packed_to_padded_D1_cpu(self): self._test_packed_to_padded_helper(1, \"cpu\") def test_packed_to_padded_D16_cpu(self): self._test_packed_to_padded_helper(16,", "10, num_verts: int = 1000, num_faces: int = 3000, device:", "self.init_meshes(16, 100, 300, device=device) mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() num_faces_per_mesh = meshes.num_faces_per_mesh()", "300, device=device) mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() num_faces_per_mesh = meshes.num_faces_per_mesh() max_faces =", "device=device) mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() num_faces_per_mesh = meshes.num_faces_per_mesh() max_faces = num_faces_per_mesh.max().item()", "num_meshes - 1: f = num_inputs else: f = first_idxs[m", "are the same. \"\"\" meshes = self.init_meshes(16, 100, 300, device=device)", "num_d: int, device: str = \"cpu\" ): meshes = TestPackedToPadded.init_meshes(", "values.requires_grad = True values_torch = values.detach().clone() values_torch.requires_grad = True values_packed", "1000, num_faces: int = 3000, device: str = \"cpu\", ):", "first_idxs, 20) @staticmethod def packed_to_padded_with_init( num_meshes: int, num_verts: int, num_faces:", "max_faces, D), device=device) for i, num in enumerate(num_faces_per_mesh): values[i, num:]", "out(): TestPackedToPadded.packed_to_padded_python( values, mesh_to_faces_packed_first_idx, max_faces, device ) torch.cuda.synchronize() return out", "device) def _test_padded_to_packed_helper(self, D, device): \"\"\" Check the results from", "= values.grad values_packed_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python( grad_inputs,", "torch.device(device) verts_list = [] faces_list = [] for _ in", "super().setUp() torch.manual_seed(1) @staticmethod def init_meshes( num_meshes: int = 10, num_verts:", "requires_grad=True) values_torch = values.detach().clone() values_torch.requires_grad = True values_padded = packed_to_padded(", "= inputs.shape[0] else: f = first_idxs[m + 1] inputs_padded[m, :f]", "mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item() ) values_packed_torch = TestPackedToPadded.padded_to_packed_python( values_torch, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item(), device,", "packed_to_padded(values, first_idxs, 100) with self.assertRaisesRegex(ValueError, \"input can only be 3-dimensional.\"):", "mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() num_faces_per_mesh = meshes.num_faces_per_mesh() max_faces = num_faces_per_mesh.max().item() if", "backward if D == 0: grad_inputs = torch.rand((len(meshes), max_faces), device=device)", "num_d == 0: values = torch.rand((faces.shape[0],), device=meshes.device) else: values =", "\"cpu\") def test_packed_to_padded_flat_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(0, device) def test_packed_to_padded_D1_cuda(self):", "self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_packed_to_padded_flat_cpu(self): self._test_packed_to_padded_helper(0, \"cpu\") def test_packed_to_padded_D1_cpu(self):", "2-dimensional.\"): values = torch.rand((100, 50, 2), device=device) first_idxs = torch.tensor([0,", "D == 0: values = torch.rand((faces.shape[0],), device=device, requires_grad=True) else: values", "= torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) with self.assertRaisesRegex(ValueError,", "get_random_cuda_device() self._test_packed_to_padded_helper(1, device) def test_packed_to_padded_D16_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(16, device)", "padded_to_packed function. \"\"\" num_meshes = inputs.size(0) D = inputs.shape[2] if", "for _ in range(num_meshes): verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device)", "faces = torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device ) verts_list.append(verts)", "device=device, requires_grad=True) values_torch = values.detach().clone() values_torch.requires_grad = True values_padded =", "import Meshes class TestPackedToPadded(TestCaseMixin, unittest.TestCase): def setUp(self) -> None: super().setUp()", "device: str = \"cpu\", ): device = torch.device(device) verts_list =", "= torch.rand((faces.shape[0], D), device=device, requires_grad=True) values_torch = values.detach().clone() values_torch.requires_grad =", "values_packed_torch = TestPackedToPadded.padded_to_packed_python( values_torch, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item(), device, ) # check", "device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def test_packed_to_padded_flat_cpu(self): self._test_packed_to_padded_helper(0, \"cpu\")", "test_packed_to_padded_D1_cpu(self): self._test_packed_to_padded_helper(1, \"cpu\") def test_packed_to_padded_D16_cpu(self): self._test_packed_to_padded_helper(16, \"cpu\") def test_packed_to_padded_flat_cuda(self): device", "only be 3-dimensional.\"): values = torch.rand((100, 50, 2, 2), device=device)", "# check forward self.assertClose(values_packed, values_packed_torch) # check backward if D", "): meshes = TestPackedToPadded.init_meshes( num_meshes, num_verts, num_faces, device ) faces", "Meshes class TestPackedToPadded(TestCaseMixin, unittest.TestCase): def setUp(self) -> None: super().setUp() torch.manual_seed(1)", "0: inputs_padded = torch.zeros((num_meshes, max_size), device=device) else: inputs_padded = torch.zeros((num_meshes,", "else: inputs_packed = torch.zeros((num_inputs, D), device=device) for m in range(num_meshes):", "= get_random_cuda_device() self._test_packed_to_padded_helper(16, device) def _test_padded_to_packed_helper(self, D, device): \"\"\" Check", "\"\"\" Check the results from packed_to_padded and PyTorch implementations are", "first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) with", "Facebook, Inc. and its affiliates. # All rights reserved. #", "padded_to_packed_python(inputs, first_idxs, num_inputs, device): \"\"\" PyTorch implementation of padded_to_packed function.", "grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs, grad_outputs_torch2) def", "0: grad_inputs = torch.rand((len(meshes), max_faces), device=device) else: grad_inputs = torch.rand((len(meshes),", "TestPackedToPadded.padded_to_packed_python( values_torch, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item(), device, ) # check forward self.assertClose(values_packed,", "unittest.TestCase): def setUp(self) -> None: super().setUp() torch.manual_seed(1) @staticmethod def init_meshes(", "else 0 if D == 0: inputs_packed = torch.zeros((num_inputs,), device=device)", "grad_outputs_torch2) def test_packed_to_padded_flat_cpu(self): self._test_packed_to_padded_helper(0, \"cpu\") def test_packed_to_padded_D1_cpu(self): self._test_packed_to_padded_helper(1, \"cpu\") def", "= TestPackedToPadded.packed_to_padded_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device ) self.assertClose(grad_outputs, grad_outputs_torch1) self.assertClose(grad_outputs,", "with self.assertRaisesRegex(ValueError, \"input can only be 2-dimensional.\"): values = torch.rand((100,", "= padded_to_packed( values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item() ) values_packed_torch = TestPackedToPadded.padded_to_packed_python( values_torch,", "meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if num_d ==", "50, 2, 2), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)", "def packed_to_padded_with_init_torch( num_meshes: int, num_verts: int, num_faces: int, num_d: int,", "torch.manual_seed(1) @staticmethod def init_meshes( num_meshes: int = 10, num_verts: int", "= get_random_cuda_device() self._test_packed_to_padded_helper(0, device) def test_packed_to_padded_D1_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(1,", "0: values = torch.rand((faces.shape[0],), device=meshes.device) else: values = torch.rand((faces.shape[0], num_d),", "Meshes(verts_list, faces_list) return meshes @staticmethod def packed_to_padded_python(inputs, first_idxs, max_size, device):", "\"input can only be 3-dimensional.\"): values = torch.rand((100, 50, 2,", "the BSD-style license found in the # LICENSE file in", "D == 0: inputs_padded = torch.zeros((num_meshes, max_size), device=device) else: inputs_padded", "self.assertClose(values_packed, values_packed_torch) # check backward if D == 0: grad_inputs", "PyTorch implementation of packed_to_padded function. \"\"\" num_meshes = first_idxs.size(0) D", "Inc. and its affiliates. # All rights reserved. # #", "= inputs.shape[1] if inputs.dim() == 2 else 0 if D", "values_torch.requires_grad = True values_padded = packed_to_padded( values, mesh_to_faces_packed_first_idx, max_faces )", "meshes = TestPackedToPadded.init_meshes( num_meshes, num_verts, num_faces, device ) faces =", "def setUp(self) -> None: super().setUp() torch.manual_seed(1) @staticmethod def init_meshes( num_meshes:", "enumerate(num_faces_per_mesh): values[i, num:] = 0 values.requires_grad = True values_torch =", "inputs[m, :f] return inputs_packed def _test_packed_to_padded_helper(self, D, device): \"\"\" Check", "torch.tensor([0, 80], dtype=torch.int64, device=device) packed_to_padded(values, first_idxs, 100) with self.assertRaisesRegex(ValueError, \"input", "torch.rand((faces.shape[0], D), device=device, requires_grad=True) values_torch = values.detach().clone() values_torch.requires_grad = True", "from common_testing import TestCaseMixin, get_random_cuda_device from pytorch3d.ops import packed_to_padded, padded_to_packed", "num_inputs else: f = first_idxs[m + 1] inputs_packed[s:f] = inputs[m,", "= meshes.num_faces_per_mesh().max().item() if num_d == 0: values = torch.rand((faces.shape[0],), device=meshes.device)", "inputs.size(0) D = inputs.shape[2] if inputs.dim() == 3 else 0", "from pytorch3d.structures.meshes import Meshes class TestPackedToPadded(TestCaseMixin, unittest.TestCase): def setUp(self) ->", "forward self.assertClose(values_packed, values_packed_torch) # check backward if D == 0:", "setUp(self) -> None: super().setUp() torch.manual_seed(1) @staticmethod def init_meshes( num_meshes: int", "int = 10, num_verts: int = 1000, num_faces: int =", "device): \"\"\" PyTorch implementation of padded_to_packed function. \"\"\" num_meshes =", "max_faces), device=device) else: grad_inputs = torch.rand((len(meshes), max_faces, D), device=device) values_padded.backward(grad_inputs)", "import packed_to_padded, padded_to_packed from pytorch3d.structures.meshes import Meshes class TestPackedToPadded(TestCaseMixin, unittest.TestCase):", "def test_packed_to_padded_flat_cpu(self): self._test_packed_to_padded_helper(0, \"cpu\") def test_packed_to_padded_D1_cpu(self): self._test_packed_to_padded_helper(1, \"cpu\") def test_packed_to_padded_D16_cpu(self):", "i, num in enumerate(num_faces_per_mesh): values[i, num:] = 0 values.requires_grad =", "= inputs.size(0) D = inputs.shape[2] if inputs.dim() == 3 else", "values = torch.rand((100,), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)", "device = get_random_cuda_device() self._test_packed_to_padded_helper(16, device) def _test_padded_to_packed_helper(self, D, device): \"\"\"", "num_faces_per_mesh.max().item() if D == 0: values = torch.rand((len(meshes), max_faces), device=device)", "torch.rand( (num_faces_per_mesh.sum().item(), D), device=device ) values_packed.backward(grad_inputs) grad_outputs = values.grad values_packed_torch.backward(grad_inputs)", "device: str = \"cpu\" ): meshes = TestPackedToPadded.init_meshes( num_meshes, num_verts,", "num_d), device=meshes.device) torch.cuda.synchronize() def out(): TestPackedToPadded.packed_to_padded_python( values, mesh_to_faces_packed_first_idx, max_faces, device", "# check forward self.assertClose(values_padded, values_padded_torch) # check backward if D", "with self.assertRaisesRegex(ValueError, \"input can only be 3-dimensional.\"): values = torch.rand((100,),", "grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device", "max_faces, device ) # check forward self.assertClose(values_padded, values_padded_torch) # check", "== 3 else 0 if D == 0: inputs_packed =", "= meshes.num_faces_per_mesh() max_faces = num_faces_per_mesh.max().item() if D == 0: values", "values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item() ) values_packed_torch = TestPackedToPadded.padded_to_packed_python( values_torch, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item(),", "self.assertRaisesRegex(ValueError, \"input can only be 3-dimensional.\"): values = torch.rand((100, 50,", "torch.randint( num_verts, size=(num_faces, 3), dtype=torch.int64, device=device ) verts_list.append(verts) faces_list.append(faces) meshes", "meshes.num_faces_per_mesh().max().item() if num_d == 0: values = torch.rand((faces.shape[0],), device=meshes.device) else:", "max_size, device): \"\"\" PyTorch implementation of packed_to_padded function. \"\"\" num_meshes", "= True values_padded = packed_to_padded( values, mesh_to_faces_packed_first_idx, max_faces ) values_padded_torch", "= first_idxs.size(0) D = inputs.shape[1] if inputs.dim() == 2 else", "for m in range(num_meshes): s = first_idxs[m] if m ==", "1] inputs_packed[s:f] = inputs[m, :f] return inputs_packed def _test_packed_to_padded_helper(self, D,", "grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device) else: grad_inputs = torch.rand( (num_faces_per_mesh.sum().item(), D),", "(num_faces_per_mesh.sum().item(), D), device=device ) values_packed.backward(grad_inputs) grad_outputs = values.grad values_packed_torch.backward(grad_inputs) grad_outputs_torch1", "device=device) padded_to_packed(values, first_idxs, 20) with self.assertRaisesRegex(ValueError, \"input can only be", "f = inputs.shape[0] else: f = first_idxs[m + 1] inputs_padded[m,", "dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) @staticmethod def packed_to_padded_with_init( num_meshes: int,", "values = torch.rand((len(meshes), max_faces, D), device=device) for i, num in", "grad_outputs_torch2) def test_padded_to_packed_flat_cpu(self): self._test_padded_to_packed_helper(0, \"cpu\") def test_padded_to_packed_D1_cpu(self): self._test_padded_to_packed_helper(1, \"cpu\") def", "= TestPackedToPadded.init_meshes( num_meshes, num_verts, num_faces, device ) faces = meshes.faces_packed()", "= [] faces_list = [] for _ in range(num_meshes): verts", "= self.init_meshes(16, 100, 300, device=device) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx =", "this source tree. import unittest import torch from common_testing import", "\"cpu\") def test_padded_to_packed_D16_cpu(self): self._test_padded_to_packed_helper(16, \"cpu\") def test_padded_to_packed_flat_cuda(self): device = get_random_cuda_device()", "3-dimensional.\"): values = torch.rand((100, 50, 2, 2), device=device) first_idxs =", "is licensed under the BSD-style license found in the #", "grad_outputs = values.grad values_packed_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python(", "= torch.zeros((num_inputs,), device=device) else: inputs_packed = torch.zeros((num_inputs, D), device=device) for", "self._test_packed_to_padded_helper(16, device) def _test_padded_to_packed_helper(self, D, device): \"\"\" Check the results", "dtype=torch.int64, device=device) padded_to_packed(values, first_idxs, 20) with self.assertRaisesRegex(ValueError, \"input can only", "return inputs_packed def _test_packed_to_padded_helper(self, D, device): \"\"\" Check the results", "tree. import unittest import torch from common_testing import TestCaseMixin, get_random_cuda_device", "from pytorch3d.ops import packed_to_padded, padded_to_packed from pytorch3d.structures.meshes import Meshes class", "device=device ) values_packed.backward(grad_inputs) grad_outputs = values.grad values_packed_torch.backward(grad_inputs) grad_outputs_torch1 = values_torch.grad", "grad_inputs = torch.rand((len(meshes), max_faces, D), device=device) values_padded.backward(grad_inputs) grad_outputs = values.grad", "m in range(num_meshes): s = first_idxs[m] if m == num_meshes", "implementations are the same. \"\"\" meshes = self.init_meshes(16, 100, 300,", "== 0: values = torch.rand((faces.shape[0],), device=meshes.device) else: values = torch.rand((faces.shape[0],", "same. \"\"\" meshes = self.init_meshes(16, 100, 300, device=device) faces =", ":f] return inputs_packed def _test_packed_to_padded_helper(self, D, device): \"\"\" Check the", "Copyright (c) Facebook, Inc. and its affiliates. # All rights", "= get_random_cuda_device() self._test_padded_to_packed_helper(16, device) def test_invalid_inputs_shapes(self, device=\"cuda:0\"): with self.assertRaisesRegex(ValueError, \"input", "values = torch.rand((100, 50, 2, 2), device=device) first_idxs = torch.tensor([0,", ") values_padded_torch = TestPackedToPadded.packed_to_padded_python( values_torch, mesh_to_faces_packed_first_idx, max_faces, device ) #", "BSD-style license found in the # LICENSE file in the", "torch.rand((num_verts, 3), dtype=torch.float32, device=device) faces = torch.randint( num_verts, size=(num_faces, 3),", "\"\"\" num_meshes = inputs.size(0) D = inputs.shape[2] if inputs.dim() ==", "get_random_cuda_device() self._test_padded_to_packed_helper(16, device) def test_invalid_inputs_shapes(self, device=\"cuda:0\"): with self.assertRaisesRegex(ValueError, \"input can", "first_idxs[m] if m == num_meshes - 1: f = num_inputs", "and PyTorch implementations are the same. \"\"\" meshes = self.init_meshes(16,", "be 2-dimensional.\"): values = torch.rand((100, 50, 2), device=device) first_idxs =", "from packed_to_padded and PyTorch implementations are the same. \"\"\" meshes", "the root directory of this source tree. import unittest import", "device=device) else: values = torch.rand((len(meshes), max_faces, D), device=device) for i,", ") # check forward self.assertClose(values_packed, values_packed_torch) # check backward if", "def test_packed_to_padded_D16_cpu(self): self._test_packed_to_padded_helper(16, \"cpu\") def test_packed_to_padded_flat_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(0,", "= values_torch.grad grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python( grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device )", "= torch.zeros((num_meshes, max_size), device=device) else: inputs_padded = torch.zeros((num_meshes, max_size, D),", "self._test_packed_to_padded_helper(16, \"cpu\") def test_packed_to_padded_flat_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(0, device) def", "test_packed_to_padded_D16_cpu(self): self._test_packed_to_padded_helper(16, \"cpu\") def test_packed_to_padded_flat_cuda(self): device = get_random_cuda_device() self._test_packed_to_padded_helper(0, device)", "range(num_meshes): s = first_idxs[m] if m == num_meshes - 1:", "values = torch.rand((faces.shape[0],), device=meshes.device) else: values = torch.rand((faces.shape[0], num_d), device=meshes.device)", "= torch.rand((100,), device=device) first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device) padded_to_packed(values,", "of this source tree. import unittest import torch from common_testing", "if num_d == 0: values = torch.rand((faces.shape[0],), device=meshes.device) else: values", "max_faces), device=device) else: values = torch.rand((len(meshes), max_faces, D), device=device) for", "return meshes @staticmethod def packed_to_padded_python(inputs, first_idxs, max_size, device): \"\"\" PyTorch", "first_idxs, num_inputs, device): \"\"\" PyTorch implementation of padded_to_packed function. \"\"\"", ") faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item()", "verts_list.append(verts) faces_list.append(faces) meshes = Meshes(verts_list, faces_list) return meshes @staticmethod def", "D = inputs.shape[2] if inputs.dim() == 3 else 0 if", "inputs_padded[m, :f] = inputs[s:f] return inputs_padded @staticmethod def padded_to_packed_python(inputs, first_idxs,", "self._test_packed_to_padded_helper(1, \"cpu\") def test_packed_to_padded_D16_cpu(self): self._test_packed_to_padded_helper(16, \"cpu\") def test_packed_to_padded_flat_cuda(self): device =", "= meshes.mesh_to_faces_packed_first_idx() num_faces_per_mesh = meshes.num_faces_per_mesh() max_faces = num_faces_per_mesh.max().item() if D", "values = torch.rand((100, 50, 2), device=device) first_idxs = torch.tensor([0, 80],", "= TestPackedToPadded.packed_to_padded_python( values_torch, mesh_to_faces_packed_first_idx, max_faces, device ) # check forward", "device=device) else: inputs_packed = torch.zeros((num_inputs, D), device=device) for m in", "0 if D == 0: inputs_padded = torch.zeros((num_meshes, max_size), device=device)", "m == num_meshes - 1: f = num_inputs else: f", "if D == 0: grad_inputs = torch.rand((len(meshes), max_faces), device=device) else:", "first_idxs[m] if m == num_meshes - 1: f = inputs.shape[0]", "300, device=device) faces = meshes.faces_packed() mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx() max_faces =", "dtype=torch.int64, device=device) packed_to_padded(values, first_idxs, 100) with self.assertRaisesRegex(ValueError, \"input can only", "reserved. # # This source code is licensed under the", "torch.cuda.synchronize() def out(): TestPackedToPadded.packed_to_padded_python( values, mesh_to_faces_packed_first_idx, max_faces, device ) torch.cuda.synchronize()", "+ 1] inputs_packed[s:f] = inputs[m, :f] return inputs_packed def _test_packed_to_padded_helper(self,", "meshes.mesh_to_faces_packed_first_idx() max_faces = meshes.num_faces_per_mesh().max().item() if num_d == 0: values =" ]
[ "geotiff.get_header('file/tiff_test/2_12.tif')) imarray, offset = geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False) assert len(imarray) ==", "because WGS 84 / UTM ... should be full out_dict", "rgb, lzw * 256 image_width (1H) 19436 * 257 image_length", "to int, fixed by astype(float) mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif') captured =", "1 def test_clip_roi_geo(): poly = shp.read_shp2d('file/shp_test/test.shp') imarray, offset = geotiff.clip_roi(poly['0'],", "339 sample_format (4H) (1, 1, 1, 1) * 33550 model_pixel_scale", "= \"\"\" TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB, little endian, bigtiff", "roi_w3_out = geotiff._is_roi_type(roi_wrong_3) assert 'Only list contains numpy.ndarray points are", "33550 model_pixel_scale (3d) (0.0029700000000000004, 0.0029700000000000004, 0\" out_dict = geotiff._prase_header_string(in_str) assert", "mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif') captured = capsys.readouterr() np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3) def", "1026 * 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone", "test_imarray_clip_2d_rgb_rgba(): photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG' roi = np.asarray([[2251, 1223], [2270, 1270],", "imarray_rgba.shape == (3456, 4608, 4) im_out_rgba, offsets = geotiff.imarray_clip(imarray_rgba, roi)", "* 34735 geo_key_directory (32H) (1, 1, 0, 7, 1024, 0,", "2 * 273 strip_offsets (31255Q) (500650, 501114, 501578, 502042, 502506,", "point = (368023.004, 3955500.669) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point) np.testing.assert_almost_equal(out, np.float32(97.45558),", "decimal=3) def test_gis2pixel2gis(): geo_head_txt = \"\"\" TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411", "out_dict = geotiff._prase_header_string(\"* 256 image_width (1H) 13503\") assert out_dict['width'] ==", "0.0, 0.0, 484576.70205, 3862285.5109300003, * 34735 geo_key_directory (32H) (1, 1,", "geo_ascii_params (30s) b'WGS 84 / UTM zone 53N|WGS 84|' \"\"\"", "gis_coord = np.asarray([[ 484593.67474654, 3862259.42413431], [ 484593.41064743, 3862259.92582402], [ 484593.64841806,", "* 33550 model_pixel_scale (3d) (0.001, 0.001, 0.0) * 33922 model_tie_point", "= np.asarray([[ 484593.67474654, 3862259.42413431], [ 484593.41064743, 3862259.92582402], [ 484593.64841806, 3862260.06515117],", "geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False) assert len(imarray) == 1 def test_clip_roi_geo(): poly", "== (368090.77975000005, 3956071.13823) in_str = \"* 33922 model_tie_point (6d) (0.0,", "mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif') captured = capsys.readouterr() # When not convert", "0, 1, 1, 1025, 0, 1, 1, 1026 * 34737", "= geotiff._prase_header_string(\"* 42113 gdal_nodata (7s) b'-10000'\") assert out_dict['nodata'] == -10000", "\"* 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 54N|WGS", "When not convert to float, mean_values = 97.562584 # assert", "np.float32(97.562584), decimal=3) # another case that not working in previous", "256 image_width (1H) 19436 * 257 image_length (1H) 31255 *", "uint8, 8 bit, rgb, lzw * 256 image_width (1H) 19436", "fixed by astype(float) mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif') captured = capsys.readouterr() np.testing.assert_almost_equal(mean_ht,", "[Todo] pass def test_mean_values(capsys): mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif') captured = capsys.readouterr()", "[ 484593.64841806, 3862260.06515117], [ 484593.93077419, 3862259.55455913], [ 484593.67474654, 3862259.42413431]]) header", "[16946, 25445], [17228, 25956], [16972, 26086]]) pixel_coord = geotiff.geo2pixel(gis_coord, header)", "= \"* 33550 model_pixel_scale (3d) (0.0029700000000000004, 0.0029700000000000004, 0\" out_dict =", "file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB, little endian, bigtiff Series 0: 31255x19436x4,", "502970, 5 * 277 samples_per_pixel (1H) 4 * 278 rows_per_strip", "test_clip_roi_geo(): poly = shp.read_shp2d('file/shp_test/test.shp') imarray, offset = geotiff.clip_roi(poly['0'], 'file/tiff_test/2_12.tif', is_geo=True)", "geotiff._is_roi_type(roi2) assert roi2_out == roi2 with pytest.raises(TypeError) as errinfo: roi_w1_out", "geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) in_str = \"* 33922", "305 software (12s) b'pix4dmapper' * 317 predictor (1H) 2 *", "version: # Cannot convert np.nan to int, fixed by astype(float)", "extra_samples (1H) 2 * 339 sample_format (4H) (1, 1, 1,", "skimage.color import rgb2gray import matplotlib.pyplot as plt def test_prase_header_string_width(): out_dict", "UTM zone 54N]' in captured.out assert out_dict['proj'] == None def", "np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3) def test_gis2pixel2gis(): geo_head_txt = \"\"\" TIFF file:", "== [roi1] roi2_out = geotiff._is_roi_type(roi2) assert roi2_out == roi2 with", "UTM zone 53N|WGS 84|' \"\"\" gis_coord = np.asarray([[ 484593.67474654, 3862259.42413431],", "bit, rgb, lzw * 256 image_width (1H) 19436 * 257", "(1H) 19866\") assert out_dict['length'] == 19866 def test_prase_header_string_scale(): in_str =", "np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) point = np.asarray([[368023.004, 3955500.669]]) p_list =", "----------------------------------------------- imarray_rgba = np.dstack((imarray_rgb, np.ones((3456, 4608)) * 255)) assert imarray_rgba.shape", "789]]) roi2 = [roi1, roi1] roi_wrong_1 = (123, 345) roi_wrong_2", "= (123, 345) roi_wrong_2 = [123, 456] roi_wrong_3 = [[123,", "roi1_out = geotiff._is_roi_type(roi1) assert roi1_out == [roi1] roi2_out = geotiff._is_roi_type(roi2)", "with pytest.raises(TypeError) as errinfo: roi_w2_out = geotiff._is_roi_type(roi_wrong_2) assert 'Only list", "def test_is_roi_type(): roi1 = np.asarray([[123, 456], [456, 789]]) roi2 =", "geotiff._is_roi_type(roi1) assert roi1_out == [roi1] roi2_out = geotiff._is_roi_type(roi2) assert roi2_out", "imarray_rgba = np.dstack((imarray_rgb, np.ones((3456, 4608)) * 255)) assert imarray_rgba.shape ==", "= geotiff._is_roi_type(roi_wrong_1) assert 'Only numpy.ndarray points and list contains numpy.ndarray", "= shp.read_shp2d('file/shp_test/test.shp') imarray, offset = geotiff.clip_roi(poly['0'], 'file/tiff_test/2_12.tif', is_geo=True) assert len(imarray)", "imread(photo_path) assert imarray_rgb.shape == (3456, 4608, 3) im_out_rgb, offsets_rgb =", "1, 1, 1025, 0, 1, 1, 1026 * 34737 geo_ascii_params", "def test_prase_header_string_length(): out_dict = geotiff._prase_header_string(\"* 257 image_length (1H) 19866\") assert", "geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif') captured = capsys.readouterr() # When not convert to float,", "geotiff._prase_header_string(\"* 257 image_length (1H) 19866\") assert out_dict['length'] == 19866 def", "np.testing.assert_almost_equal(out, expected, decimal=3) def test_point_query_list_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032,", "out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point) np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3) def test_point_query_numpy_points(): points", "zone 53N|WGS 84|' \"\"\" gis_coord = np.asarray([[ 484593.67474654, 3862259.42413431], [", "def test_get_imarray_with_header(capsys): pass def test_point_query_one_point(): point = (368023.004, 3955500.669) out", "points and list contains numpy.ndarray points are supported' in str(errinfo.value)", "f\"[io][geotiff][GeoCorrd] Comprehense [{in_str}]\" in captured.out assert out_dict['proj'] == pyproj.CRS.from_epsg(32654) def", "97.59617])] out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list) assert type(expected) == type(out) np.testing.assert_almost_equal(expected[0],", "ax = plt.subplots(1,3, figsize=(12,4)) # ----------------------------------------------- imarray_rgb = imread(photo_path) assert", "out_dict['tie_point'] == (368090.77975000005, 3956071.13823) def test_prase_header_string_nodata(): out_dict = geotiff._prase_header_string(\"* 42113", "image_width (1H) 13503\") assert out_dict['width'] == 13503 def test_prase_header_string_length(): out_dict", "pytest import numpy as np from easyric.io import geotiff, shp", "assert mean_ht == np.float32(97.562584) np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3) # another case", "roi2_out = geotiff._is_roi_type(roi2) assert roi2_out == roi2 with pytest.raises(TypeError) as", "= capsys.readouterr() np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3) def test_gis2pixel2gis(): geo_head_txt = \"\"\"", "ax[1].imshow(im_out_rgb / 255) ax[1].set_title('rgb') # ----------------------------------------------- imarray_2d = rgb2gray(imarray_rgb) assert", "= shp.read_shp2d('file/shp_test/test.shp') poly_pixel = geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif')) imarray, offset = geotiff.clip_roi(poly_pixel,", "raise error because WGS 84 / UTM ... should be", "are supported' in str(errinfo.value) def test_imarray_clip_2d_rgb_rgba(): photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG' roi", "test_prase_header_string_length(): out_dict = geotiff._prase_header_string(\"* 257 image_length (1H) 19866\") assert out_dict['length']", "geotiff.imarray_clip(imarray_2d, roi) ax[0].imshow(im_out_2d, cmap='gray') ax[0].set_title('gray') # ----------------------------------------------- imarray_rgba = np.dstack((imarray_rgb,", "# [Todo] pass def test_mean_values(capsys): mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif') captured =", "pytest.raises(TypeError) as errinfo: roi_w2_out = geotiff._is_roi_type(roi_wrong_2) assert 'Only list contains", "geotiff._is_roi_type(roi_wrong_1) assert 'Only numpy.ndarray points and list contains numpy.ndarray points", "[[123, 345], [456, 789]] roi1_out = geotiff._is_roi_type(roi1) assert roi1_out ==", "are supported' in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w3_out =", "0.001, 0.0) * 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 484576.70205,", "[2270, 1270], [2227, 1263], [2251, 1223]]) fig, ax = plt.subplots(1,3,", "(123, 345) roi_wrong_2 = [123, 456] roi_wrong_3 = [[123, 345],", "<filename>easyric/tests/test_io_geotiff.py import pyproj import pytest import numpy as np from", "= capsys.readouterr() # When not convert to float, mean_values =", "np.float32(97.562584) np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3) # another case that not working", "3956071.13823,\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) in_str", "255)) assert imarray_rgba.shape == (3456, 4608, 4) im_out_rgba, offsets =", "34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 53N|WGS 84|'", "geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) def test_prase_header_string_nodata(): out_dict =", "previous version: # Cannot convert np.nan to int, fixed by", "0: 31255x19436x4, uint8, YXS, 1 pages, not mem-mappable Page 0:", "26086], [16708, 25585], [16946, 25445], [17228, 25956], [16972, 26086]]) pixel_coord", "54N|WGS 84|'\" out_dict = geotiff._prase_header_string(in_str) captured = capsys.readouterr() assert f\"[io][geotiff][GeoCorrd]", "501578, 502042, 502506, 502970, 5 * 277 samples_per_pixel (1H) 4", "np from easyric.io import geotiff, shp from skimage.io import imread", "assert imarray_rgb.shape == (3456, 4608, 3) im_out_rgb, offsets_rgb = geotiff.imarray_clip(imarray_rgb,", "= geotiff.imarray_clip(imarray_rgb, roi) ax[1].imshow(im_out_rgb / 255) ax[1].set_title('rgb') # ----------------------------------------------- imarray_2d", "cmap='gray') ax[0].set_title('gray') # ----------------------------------------------- imarray_rgba = np.dstack((imarray_rgb, np.ones((3456, 4608)) *", "Page 0: 31255x19436x4, uint8, 8 bit, rgb, lzw * 256", "\"\"\" gis_coord = np.asarray([[ 484593.67474654, 3862259.42413431], [ 484593.41064743, 3862259.92582402], [", "[123, 456] roi_wrong_3 = [[123, 345], [456, 789]] roi1_out =", "(1H) 13503\") assert out_dict['width'] == 13503 def test_prase_header_string_length(): out_dict =", "4608)) * 255)) assert imarray_rgba.shape == (3456, 4608, 4) im_out_rgba,", "out_dict['width'] == 13503 def test_prase_header_string_length(): out_dict = geotiff._prase_header_string(\"* 257 image_length", "# should raise error because WGS 84 / UTM ...", "failed, because [Input is not a CRS: UTM zone 54N]'", "b'WGS 84 / UTM zone 53N|WGS 84|' \"\"\" gis_coord =", "out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points) expected = np.asarray([97.624344, 97.59617]) np.testing.assert_almost_equal(out, expected,", "roi2 = [roi1, roi1] roi_wrong_1 = (123, 345) roi_wrong_2 =", "header = geotiff._prase_header_string(geo_head_txt) expected_pixel = np.asarray([[16972, 26086], [16708, 25585], [16946,", "(3456, 4608, 3) im_out_rgb, offsets_rgb = geotiff.imarray_clip(imarray_rgb, roi) ax[1].imshow(im_out_rgb /", "26086]]) pixel_coord = geotiff.geo2pixel(gis_coord, header) np.testing.assert_almost_equal(pixel_coord, expected_pixel) gis_revert = geotiff.pixel2geo(pixel_coord,", "test_get_imarray_with_header(capsys): pass def test_point_query_one_point(): point = (368023.004, 3955500.669) out =", "expected, decimal=3) def test_point_query_list_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]])", "in_str = \"* 33550 model_pixel_scale (3d) (0.0029700000000000004, 0.0029700000000000004, 0\" out_dict", "np.asarray([97.624344, 97.59617]) np.testing.assert_almost_equal(out, expected, decimal=3) def test_point_query_list_numpy_points(): points = np.asarray([[368022.581,", "out_dict['tie_point'] == (368090.77975000005, 3956071.13823) in_str = \"* 33922 model_tie_point (6d)", "484576.70205, 3862285.5109300003, * 34735 geo_key_directory (32H) (1, 1, 0, 7,", "3862285.5109300003, * 34735 geo_key_directory (32H) (1, 1, 0, 7, 1024,", "(368090.77975000005, 3956071.13823) in_str = \"* 33922 model_tie_point (6d) (0.0, 0.0,", "84 / UTM zone 54N|WGS 84|'\" out_dict = geotiff._prase_header_string(in_str) captured", "Comprehense [{in_str}]\" in captured.out assert out_dict['proj'] == pyproj.CRS.from_epsg(32654) def test_prase_header_string_proj_error(capsys):", "assert imarray_rgba.shape == (3456, 4608, 4) im_out_rgba, offsets = geotiff.imarray_clip(imarray_rgba,", "expected = np.asarray([97.624344, 97.59617]) np.testing.assert_almost_equal(out, expected, decimal=3) def test_point_query_list_numpy_points(): points", "= geotiff._is_roi_type(roi2) assert roi2_out == roi2 with pytest.raises(TypeError) as errinfo:", "3955500.465]]) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points) expected = np.asarray([97.624344, 97.59617]) np.testing.assert_almost_equal(out,", "offset = geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False) assert len(imarray) == 1 def", "== type(out) np.testing.assert_almost_equal(expected[0], out[0], decimal=3) np.testing.assert_almost_equal(expected[1], out[1], decimal=3) def test_point_query_wrong_types():", "# Cannot convert np.nan to int, fixed by astype(float) mean_ht", "* 259 compression (1H) 5 * 262 photometric (1H) 2", "np.testing.assert_almost_equal(pixel_coord, expected_pixel) gis_revert = geotiff.pixel2geo(pixel_coord, header) np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3) def", "pixel_coord = geotiff.geo2pixel(gis_coord, header) np.testing.assert_almost_equal(pixel_coord, expected_pixel) gis_revert = geotiff.pixel2geo(pixel_coord, header)", "from easyric.io import geotiff, shp from skimage.io import imread from", "258 bits_per_sample (4H) (8, 8, 8, 8) * 259 compression", "test_clip_roi_pixel(): poly = shp.read_shp2d('file/shp_test/test.shp') poly_pixel = geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif')) imarray, offset", "1024, 0, 1, 1, 1025, 0, 1, 1, 1026 *", "be full out_dict = geotiff._prase_header_string(\"* 34737 geo_ascii_params (30s) b'UTM zone", "type(out) np.testing.assert_almost_equal(expected[0], out[0], decimal=3) np.testing.assert_almost_equal(expected[1], out[1], decimal=3) def test_point_query_wrong_types(): #", "def test_prase_header_string_tie_point(): in_str = \"* 33922 model_tie_point (6d) (0.0, 0.0,", "shp from skimage.io import imread from skimage.color import rgb2gray import", "out_dict['nodata'] == -10000 def test_prase_header_string_proj_normal(capsys): in_str = \"* 34737 geo_ascii_params", "to float, mean_values = 97.562584 # assert mean_ht == np.float32(97.562584)", "test_is_roi_type(): roi1 = np.asarray([[123, 456], [456, 789]]) roi2 = [roi1,", "3955500.669]]) p_list = [point, points] expected = [np.asarray([97.45558]), np.asarray([97.624344, 97.59617])]", "= np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points) expected", "* 339 sample_format (4H) (1, 1, 1, 1) * 33550", "200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB, little endian, bigtiff Series 0: 31255x19436x4, uint8,", "2 * 338 extra_samples (1H) 2 * 339 sample_format (4H)", "3955500.669) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point) np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3) def test_point_query_numpy_points():", "* 284 planar_configuration (1H) 1 * 305 software (12s) b'pix4dmapper'", "pass def test_get_imarray_with_header(capsys): pass def test_point_query_one_point(): point = (368023.004, 3955500.669)", "another case that not working in previous version: # Cannot", "contains numpy.ndarray points are supported' in str(errinfo.value) with pytest.raises(TypeError) as", "== (3456, 4608, 3) im_out_rgb, offsets_rgb = geotiff.imarray_clip(imarray_rgb, roi) ax[1].imshow(im_out_rgb", "gis_revert = geotiff.pixel2geo(pixel_coord, header) np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3) def test_is_roi_type(): roi1", "(1H) 2 * 338 extra_samples (1H) 2 * 339 sample_format", "decimal=3) def test_point_query_wrong_types(): # [TODO] pass def test_point_query_input_ndarray(): # [Todo]", "test_gis2pixel2gis(): geo_head_txt = \"\"\" TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB, little", "mem-mappable Page 0: 31255x19436x4, uint8, 8 bit, rgb, lzw *", "samples_per_pixel (1H) 4 * 278 rows_per_strip (1H) 1 * 279", "full out_dict = geotiff._prase_header_string(\"* 34737 geo_ascii_params (30s) b'UTM zone 54N|WGS", "464, 464, 464, 464, * 284 planar_configuration (1H) 1 *", "* 256 image_width (1H) 19436 * 257 image_length (1H) 31255", "ax[0].set_title('gray') # ----------------------------------------------- imarray_rgba = np.dstack((imarray_rgb, np.ones((3456, 4608)) * 255))", "317 predictor (1H) 2 * 338 extra_samples (1H) 2 *", "(3d) (0.001, 0.001, 0.0) * 33922 model_tie_point (6d) (0.0, 0.0,", "geotiff._prase_header_string(\"* 34737 geo_ascii_params (30s) b'UTM zone 54N|WGS 84|'\") captured =", "(1, 1, 1, 1) * 33550 model_pixel_scale (3d) (0.001, 0.001,", "(6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823, 0\" out_dict = geotiff._prase_header_string(in_str)", "284 planar_configuration (1H) 1 * 305 software (12s) b'pix4dmapper' *", "bigtiff Series 0: 31255x19436x4, uint8, YXS, 1 pages, not mem-mappable", "str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w3_out = geotiff._is_roi_type(roi_wrong_3) assert 'Only", "277 samples_per_pixel (1H) 4 * 278 rows_per_strip (1H) 1 *", "in_str = \"* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005,", "zone 54N]' in captured.out assert out_dict['proj'] == None def test_get_imarray_without_header(capsys):", "decimal=3) def test_point_query_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) out", "= np.asarray([[2251, 1223], [2270, 1270], [2227, 1263], [2251, 1223]]) fig,", "33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823, 0\" out_dict", "out_dict = geotiff._prase_header_string(\"* 257 image_length (1H) 19866\") assert out_dict['length'] ==", "# When not convert to float, mean_values = 97.562584 #", "* 262 photometric (1H) 2 * 273 strip_offsets (31255Q) (500650,", "= geotiff._prase_header_string(in_str) assert out_dict['scale'] == (0.0029700000000000004, 0.0029700000000000004) def test_prase_header_string_tie_point(): in_str", "Series 0: 31255x19436x4, uint8, YXS, 1 pages, not mem-mappable Page", "4608, 4) im_out_rgba, offsets = geotiff.imarray_clip(imarray_rgba, roi) ax[2].imshow(im_out_rgba/255) ax[2].set_title('rgba') plt.show()", "None def test_get_imarray_without_header(capsys): pass def test_get_imarray_with_header(capsys): pass def test_point_query_one_point(): point", "33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823,\" out_dict =", "* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 484576.70205, 3862285.5109300003, *", "in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w2_out = geotiff._is_roi_type(roi_wrong_2) assert", "rgb2gray import matplotlib.pyplot as plt def test_prase_header_string_width(): out_dict = geotiff._prase_header_string(\"*", "0.0029700000000000004) def test_prase_header_string_tie_point(): in_str = \"* 33922 model_tie_point (6d) (0.0,", "def test_point_query_list_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) point =", "bits_per_sample (4H) (8, 8, 8, 8) * 259 compression (1H)", "25956], [16972, 26086]]) pixel_coord = geotiff.geo2pixel(gis_coord, header) np.testing.assert_almost_equal(pixel_coord, expected_pixel) gis_revert", "rgb2gray(imarray_rgb) assert imarray_2d.shape == (3456, 4608) im_out_2d, offsets_2d = geotiff.imarray_clip(imarray_2d,", "as errinfo: roi_w2_out = geotiff._is_roi_type(roi_wrong_2) assert 'Only list contains numpy.ndarray", "84 / UTM zone 53N|WGS 84|' \"\"\" gis_coord = np.asarray([[", "(0.0, 0.0, 0.0, 484576.70205, 3862285.5109300003, * 34735 geo_key_directory (32H) (1,", "geotiff.imarray_clip(imarray_rgba, roi) ax[2].imshow(im_out_rgba/255) ax[2].set_title('rgba') plt.show() def test_clip_roi_pixel(): poly = shp.read_shp2d('file/shp_test/test.shp')", "imarray, offset = geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False) assert len(imarray) == 1", "789]] roi1_out = geotiff._is_roi_type(roi1) assert roi1_out == [roi1] roi2_out =", "= geotiff._prase_header_string(\"* 256 image_width (1H) 13503\") assert out_dict['width'] == 13503", "== None def test_get_imarray_without_header(capsys): pass def test_get_imarray_with_header(capsys): pass def test_point_query_one_point():", "0.0, 0.0, 368090.77975000005, 3956071.13823,\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] ==", "WGS 84 / UTM ... should be full out_dict =", "3862259.42413431]]) header = geotiff._prase_header_string(geo_head_txt) expected_pixel = np.asarray([[16972, 26086], [16708, 25585],", "= rgb2gray(imarray_rgb) assert imarray_2d.shape == (3456, 4608) im_out_2d, offsets_2d =", "geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points) expected = np.asarray([97.624344, 97.59617]) np.testing.assert_almost_equal(out, expected, decimal=3) def", "= (368023.004, 3955500.669) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point) np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3)", "photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG' roi = np.asarray([[2251, 1223], [2270, 1270], [2227,", "# ----------------------------------------------- imarray_rgb = imread(photo_path) assert imarray_rgb.shape == (3456, 4608,", "Generation failed, because [Input is not a CRS: UTM zone", "= geotiff._prase_header_string(\"* 34737 geo_ascii_params (30s) b'UTM zone 54N|WGS 84|'\") captured", "working in previous version: # Cannot convert np.nan to int,", "* 305 software (12s) b'pix4dmapper' * 317 predictor (1H) 2", "* 255)) assert imarray_rgba.shape == (3456, 4608, 4) im_out_rgba, offsets", "points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) point = np.asarray([[368023.004, 3955500.669]])", "geotiff.imarray_clip(imarray_rgb, roi) ax[1].imshow(im_out_rgb / 255) ax[1].set_title('rgb') # ----------------------------------------------- imarray_2d =", "484593.93077419, 3862259.55455913], [ 484593.67474654, 3862259.42413431]]) header = geotiff._prase_header_string(geo_head_txt) expected_pixel =", "photometric (1H) 2 * 273 strip_offsets (31255Q) (500650, 501114, 501578,", "roi2_out == roi2 with pytest.raises(TypeError) as errinfo: roi_w1_out = geotiff._is_roi_type(roi_wrong_1)", "out_dict = geotiff._prase_header_string(in_str) assert out_dict['scale'] == (0.0029700000000000004, 0.0029700000000000004) def test_prase_header_string_tie_point():", "== (368090.77975000005, 3956071.13823) def test_prase_header_string_nodata(): out_dict = geotiff._prase_header_string(\"* 42113 gdal_nodata", "a CRS: UTM zone 54N]' in captured.out assert out_dict['proj'] ==", "(1H) 1 * 305 software (12s) b'pix4dmapper' * 317 predictor", "zone 54N|WGS 84|'\") captured = capsys.readouterr() assert '[io][geotiff][GeoCorrd] Generation failed,", "4608) im_out_2d, offsets_2d = geotiff.imarray_clip(imarray_2d, roi) ax[0].imshow(im_out_2d, cmap='gray') ax[0].set_title('gray') #", "image_length (1H) 31255 * 258 bits_per_sample (4H) (8, 8, 8,", "np.asarray([97.624344, 97.59617])] out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list) assert type(expected) == type(out)", "geotiff._is_roi_type(roi_wrong_2) assert 'Only list contains numpy.ndarray points are supported' in", "def test_mean_values(capsys): mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif') captured = capsys.readouterr() # When", "[17228, 25956], [16972, 26086]]) pixel_coord = geotiff.geo2pixel(gis_coord, header) np.testing.assert_almost_equal(pixel_coord, expected_pixel)", "(30s) b'WGS 84 / UTM zone 53N|WGS 84|' \"\"\" gis_coord", "as np from easyric.io import geotiff, shp from skimage.io import", "(1H) 2 * 273 strip_offsets (31255Q) (500650, 501114, 501578, 502042,", "(6d) (0.0, 0.0, 0.0, 484576.70205, 3862285.5109300003, * 34735 geo_key_directory (32H)", "decimal=3) def test_point_query_list_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) point", "(7s) b'-10000'\") assert out_dict['nodata'] == -10000 def test_prase_header_string_proj_normal(capsys): in_str =", "= geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point) np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3) def test_point_query_numpy_points(): points =", "[368024.032, 3955500.465]]) point = np.asarray([[368023.004, 3955500.669]]) p_list = [point, points]", "54N]' in captured.out assert out_dict['proj'] == None def test_get_imarray_without_header(capsys): pass", "411 MiB, little endian, bigtiff Series 0: 31255x19436x4, uint8, YXS,", "5 * 277 samples_per_pixel (1H) 4 * 278 rows_per_strip (1H)", "* 277 samples_per_pixel (1H) 4 * 278 rows_per_strip (1H) 1", "supported' in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w2_out = geotiff._is_roi_type(roi_wrong_2)", "figsize=(12,4)) # ----------------------------------------------- imarray_rgb = imread(photo_path) assert imarray_rgb.shape == (3456,", "as errinfo: roi_w1_out = geotiff._is_roi_type(roi_wrong_1) assert 'Only numpy.ndarray points and", "imread from skimage.color import rgb2gray import matplotlib.pyplot as plt def", "# ----------------------------------------------- imarray_2d = rgb2gray(imarray_rgb) assert imarray_2d.shape == (3456, 4608)", "= [[123, 345], [456, 789]] roi1_out = geotiff._is_roi_type(roi1) assert roi1_out", "in captured.out assert out_dict['proj'] == pyproj.CRS.from_epsg(32654) def test_prase_header_string_proj_error(capsys): # should", "(1H) 2 * 339 sample_format (4H) (1, 1, 1, 1)", "zone 54N|WGS 84|'\" out_dict = geotiff._prase_header_string(in_str) captured = capsys.readouterr() assert", "test_point_query_one_point(): point = (368023.004, 3955500.669) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point) np.testing.assert_almost_equal(out,", "lzw * 256 image_width (1H) 19436 * 257 image_length (1H)", "0, 1, 1, 1026 * 34737 geo_ascii_params (30s) b'WGS 84", "im_out_rgb, offsets_rgb = geotiff.imarray_clip(imarray_rgb, roi) ax[1].imshow(im_out_rgb / 255) ax[1].set_title('rgb') #", "# another case that not working in previous version: #", "= \"* 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone", "456] roi_wrong_3 = [[123, 345], [456, 789]] roi1_out = geotiff._is_roi_type(roi1)", "484593.64841806, 3862260.06515117], [ 484593.93077419, 3862259.55455913], [ 484593.67474654, 3862259.42413431]]) header =", "b'WGS 84 / UTM zone 54N|WGS 84|'\" out_dict = geotiff._prase_header_string(in_str)", "(1H) 4 * 278 rows_per_strip (1H) 1 * 279 strip_byte_counts", "(30s) b'WGS 84 / UTM zone 54N|WGS 84|'\" out_dict =", "484593.67474654, 3862259.42413431], [ 484593.41064743, 3862259.92582402], [ 484593.64841806, 3862260.06515117], [ 484593.93077419,", "def test_clip_roi_pixel(): poly = shp.read_shp2d('file/shp_test/test.shp') poly_pixel = geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif')) imarray,", "255) ax[1].set_title('rgb') # ----------------------------------------------- imarray_2d = rgb2gray(imarray_rgb) assert imarray_2d.shape ==", "points are supported' in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w3_out", "geo_key_directory (32H) (1, 1, 0, 7, 1024, 0, 1, 1,", "b'-10000'\") assert out_dict['nodata'] == -10000 def test_prase_header_string_proj_normal(capsys): in_str = \"*", "in captured.out assert out_dict['proj'] == None def test_get_imarray_without_header(capsys): pass def", "* 257 image_length (1H) 31255 * 258 bits_per_sample (4H) (8,", "assert roi1_out == [roi1] roi2_out = geotiff._is_roi_type(roi2) assert roi2_out ==", "= geotiff.mean_values(r'file/tiff_test/2_12.tif') captured = capsys.readouterr() np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3) def test_gis2pixel2gis():", "= np.dstack((imarray_rgb, np.ones((3456, 4608)) * 255)) assert imarray_rgba.shape == (3456,", "1 pages, not mem-mappable Page 0: 31255x19436x4, uint8, 8 bit,", "not mem-mappable Page 0: 31255x19436x4, uint8, 8 bit, rgb, lzw", "with pytest.raises(TypeError) as errinfo: roi_w1_out = geotiff._is_roi_type(roi_wrong_1) assert 'Only numpy.ndarray", "as errinfo: roi_w3_out = geotiff._is_roi_type(roi_wrong_3) assert 'Only list contains numpy.ndarray", "geotiff, shp from skimage.io import imread from skimage.color import rgb2gray", "= np.asarray([[16972, 26086], [16708, 25585], [16946, 25445], [17228, 25956], [16972,", "pyproj import pytest import numpy as np from easyric.io import", "== 19866 def test_prase_header_string_scale(): in_str = \"* 33550 model_pixel_scale (3d)", "points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points)", "8, 8) * 259 compression (1H) 5 * 262 photometric", "fig, ax = plt.subplots(1,3, figsize=(12,4)) # ----------------------------------------------- imarray_rgb = imread(photo_path)", "= \"* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823,", "supported' in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w3_out = geotiff._is_roi_type(roi_wrong_3)", "259 compression (1H) 5 * 262 photometric (1H) 2 *", "ax[2].imshow(im_out_rgba/255) ax[2].set_title('rgba') plt.show() def test_clip_roi_pixel(): poly = shp.read_shp2d('file/shp_test/test.shp') poly_pixel =", "uint8, YXS, 1 pages, not mem-mappable Page 0: 31255x19436x4, uint8,", "capsys.readouterr() assert '[io][geotiff][GeoCorrd] Generation failed, because [Input is not a", "case that not working in previous version: # Cannot convert", "capsys.readouterr() np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3) def test_gis2pixel2gis(): geo_head_txt = \"\"\" TIFF", "(1H) 31255 * 258 bits_per_sample (4H) (8, 8, 8, 8)", "34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 54N|WGS 84|'\"", "== (3456, 4608) im_out_2d, offsets_2d = geotiff.imarray_clip(imarray_2d, roi) ax[0].imshow(im_out_2d, cmap='gray')", "/ UTM ... should be full out_dict = geotiff._prase_header_string(\"* 34737", "compression (1H) 5 * 262 photometric (1H) 2 * 273", "list contains numpy.ndarray points are supported' in str(errinfo.value) def test_imarray_clip_2d_rgb_rgba():", "offsets_2d = geotiff.imarray_clip(imarray_2d, roi) ax[0].imshow(im_out_2d, cmap='gray') ax[0].set_title('gray') # ----------------------------------------------- imarray_rgba", "1 * 279 strip_byte_counts (31255Q) (464, 464, 464, 464, 464,", "484593.67474654, 3862259.42413431]]) header = geotiff._prase_header_string(geo_head_txt) expected_pixel = np.asarray([[16972, 26086], [16708,", "shp.read_shp2d('file/shp_test/test.shp') imarray, offset = geotiff.clip_roi(poly['0'], 'file/tiff_test/2_12.tif', is_geo=True) assert len(imarray) ==", "= capsys.readouterr() assert f\"[io][geotiff][GeoCorrd] Comprehense [{in_str}]\" in captured.out assert out_dict['proj']", "'file/pix4d.diy/photos/DJI_0174.JPG' roi = np.asarray([[2251, 1223], [2270, 1270], [2227, 1263], [2251,", "strip_offsets (31255Q) (500650, 501114, 501578, 502042, 502506, 502970, 5 *", "planar_configuration (1H) 1 * 305 software (12s) b'pix4dmapper' * 317", "1, 1026 * 34737 geo_ascii_params (30s) b'WGS 84 / UTM", "np.asarray([[2251, 1223], [2270, 1270], [2227, 1263], [2251, 1223]]) fig, ax", "262 photometric (1H) 2 * 273 strip_offsets (31255Q) (500650, 501114,", "464, 464, 464, 464, 464, 464, * 284 planar_configuration (1H)", "roi1] roi_wrong_1 = (123, 345) roi_wrong_2 = [123, 456] roi_wrong_3", "pass def test_point_query_one_point(): point = (368023.004, 3955500.669) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif',", "capsys.readouterr() assert f\"[io][geotiff][GeoCorrd] Comprehense [{in_str}]\" in captured.out assert out_dict['proj'] ==", "... should be full out_dict = geotiff._prase_header_string(\"* 34737 geo_ascii_params (30s)", "456], [456, 789]]) roi2 = [roi1, roi1] roi_wrong_1 = (123,", "YXS, 1 pages, not mem-mappable Page 0: 31255x19436x4, uint8, 8", "as plt def test_prase_header_string_width(): out_dict = geotiff._prase_header_string(\"* 256 image_width (1H)", "roi_w1_out = geotiff._is_roi_type(roi_wrong_1) assert 'Only numpy.ndarray points and list contains", "roi_wrong_2 = [123, 456] roi_wrong_3 = [[123, 345], [456, 789]]", "25585], [16946, 25445], [17228, 25956], [16972, 26086]]) pixel_coord = geotiff.geo2pixel(gis_coord,", "(0.001, 0.001, 0.0) * 33922 model_tie_point (6d) (0.0, 0.0, 0.0,", "84 / UTM ... should be full out_dict = geotiff._prase_header_string(\"*", "368090.77975000005, 3956071.13823, 0\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005,", "import rgb2gray import matplotlib.pyplot as plt def test_prase_header_string_width(): out_dict =", "np.asarray([[16972, 26086], [16708, 25585], [16946, 25445], [17228, 25956], [16972, 26086]])", "1, 1, 1) * 33550 model_pixel_scale (3d) (0.001, 0.001, 0.0)", "out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list) assert type(expected) == type(out) np.testing.assert_almost_equal(expected[0], out[0],", "0.0, 368090.77975000005, 3956071.13823, 0\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] ==", "def test_get_imarray_without_header(capsys): pass def test_get_imarray_with_header(capsys): pass def test_point_query_one_point(): point =", "in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w3_out = geotiff._is_roi_type(roi_wrong_3) assert", "import numpy as np from easyric.io import geotiff, shp from", "geotiff.geo2pixel(gis_coord, header) np.testing.assert_almost_equal(pixel_coord, expected_pixel) gis_revert = geotiff.pixel2geo(pixel_coord, header) np.testing.assert_almost_equal(gis_revert, gis_coord,", "roi1_out == [roi1] roi2_out = geotiff._is_roi_type(roi2) assert roi2_out == roi2", "points are supported' in str(errinfo.value) def test_imarray_clip_2d_rgb_rgba(): photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG'", "pytest.raises(TypeError) as errinfo: roi_w1_out = geotiff._is_roi_type(roi_wrong_1) assert 'Only numpy.ndarray points", "roi_wrong_1 = (123, 345) roi_wrong_2 = [123, 456] roi_wrong_3 =", "3862259.92582402], [ 484593.64841806, 3862260.06515117], [ 484593.93077419, 3862259.55455913], [ 484593.67474654, 3862259.42413431]])", "list contains numpy.ndarray points are supported' in str(errinfo.value) with pytest.raises(TypeError)", "b'UTM zone 54N|WGS 84|'\") captured = capsys.readouterr() assert '[io][geotiff][GeoCorrd] Generation", "[point, points] expected = [np.asarray([97.45558]), np.asarray([97.624344, 97.59617])] out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif',", "31255x19436x4, uint8, YXS, 1 pages, not mem-mappable Page 0: 31255x19436x4,", "'Only list contains numpy.ndarray points are supported' in str(errinfo.value) with", "= [np.asarray([97.45558]), np.asarray([97.624344, 97.59617])] out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list) assert type(expected)", "= np.asarray([[123, 456], [456, 789]]) roi2 = [roi1, roi1] roi_wrong_1", "0.0, 368090.77975000005, 3956071.13823,\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005,", "def test_imarray_clip_2d_rgb_rgba(): photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG' roi = np.asarray([[2251, 1223], [2270,", "(6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823,\" out_dict = geotiff._prase_header_string(in_str) assert", "= geotiff._prase_header_string(geo_head_txt) expected_pixel = np.asarray([[16972, 26086], [16708, 25585], [16946, 25445],", "464, 464, 464, 464, 464, 464, 464, 464, * 284", "def test_point_query_one_point(): point = (368023.004, 3955500.669) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point)", "def test_prase_header_string_scale(): in_str = \"* 33550 model_pixel_scale (3d) (0.0029700000000000004, 0.0029700000000000004,", "(12s) b'pix4dmapper' * 317 predictor (1H) 2 * 338 extra_samples", "== (3456, 4608, 4) im_out_rgba, offsets = geotiff.imarray_clip(imarray_rgba, roi) ax[2].imshow(im_out_rgba/255)", "image_length (1H) 19866\") assert out_dict['length'] == 19866 def test_prase_header_string_scale(): in_str", "(368023.004, 3955500.669) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point) np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3) def", "pass def test_point_query_input_ndarray(): # [Todo] pass def test_mean_values(capsys): mean_ht =", "test_prase_header_string_proj_error(capsys): # should raise error because WGS 84 / UTM", "poly = shp.read_shp2d('file/shp_test/test.shp') poly_pixel = geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif')) imarray, offset =", "geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif')) imarray, offset = geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False) assert len(imarray)", "54N|WGS 84|'\") captured = capsys.readouterr() assert '[io][geotiff][GeoCorrd] Generation failed, because", "error because WGS 84 / UTM ... should be full", "338 extra_samples (1H) 2 * 339 sample_format (4H) (1, 1,", "3956071.13823) in_str = \"* 33922 model_tie_point (6d) (0.0, 0.0, 0.0,", "from skimage.color import rgb2gray import matplotlib.pyplot as plt def test_prase_header_string_width():", "type(expected) == type(out) np.testing.assert_almost_equal(expected[0], out[0], decimal=3) np.testing.assert_almost_equal(expected[1], out[1], decimal=3) def", "captured.out assert out_dict['proj'] == None def test_get_imarray_without_header(capsys): pass def test_get_imarray_with_header(capsys):", "== np.float32(97.562584) np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3) # another case that not", "1) * 33550 model_pixel_scale (3d) (0.001, 0.001, 0.0) * 33922", "plt def test_prase_header_string_width(): out_dict = geotiff._prase_header_string(\"* 256 image_width (1H) 13503\")", "str(errinfo.value) def test_imarray_clip_2d_rgb_rgba(): photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG' roi = np.asarray([[2251, 1223],", "(32H) (1, 1, 0, 7, 1024, 0, 1, 1, 1025,", "b'pix4dmapper' * 317 predictor (1H) 2 * 338 extra_samples (1H)", "str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w2_out = geotiff._is_roi_type(roi_wrong_2) assert 'Only", "convert np.nan to int, fixed by astype(float) mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif')", "is_geo=False) assert len(imarray) == 1 def test_clip_roi_geo(): poly = shp.read_shp2d('file/shp_test/test.shp')", "in previous version: # Cannot convert np.nan to int, fixed", "(3d) (0.0029700000000000004, 0.0029700000000000004, 0\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['scale'] ==", "42113 gdal_nodata (7s) b'-10000'\") assert out_dict['nodata'] == -10000 def test_prase_header_string_proj_normal(capsys):", "= capsys.readouterr() assert '[io][geotiff][GeoCorrd] Generation failed, because [Input is not", "imarray_rgb = imread(photo_path) assert imarray_rgb.shape == (3456, 4608, 3) im_out_rgb,", "= geotiff._is_roi_type(roi1) assert roi1_out == [roi1] roi2_out = geotiff._is_roi_type(roi2) assert", "should raise error because WGS 84 / UTM ... should", "point = np.asarray([[368023.004, 3955500.669]]) p_list = [point, points] expected =", "33550 model_pixel_scale (3d) (0.001, 0.001, 0.0) * 33922 model_tie_point (6d)", "test_point_query_input_ndarray(): # [Todo] pass def test_mean_values(capsys): mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif') captured", "int, fixed by astype(float) mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif') captured = capsys.readouterr()", "numpy.ndarray points and list contains numpy.ndarray points are supported' in", "= 97.562584 # assert mean_ht == np.float32(97.562584) np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3)", "31255x19436x4, uint8, 8 bit, rgb, lzw * 256 image_width (1H)", "assert type(expected) == type(out) np.testing.assert_almost_equal(expected[0], out[0], decimal=3) np.testing.assert_almost_equal(expected[1], out[1], decimal=3)", "plt.show() def test_clip_roi_pixel(): poly = shp.read_shp2d('file/shp_test/test.shp') poly_pixel = geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif'))", "\"* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823,\" out_dict", "header) np.testing.assert_almost_equal(pixel_coord, expected_pixel) gis_revert = geotiff.pixel2geo(pixel_coord, header) np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3)", "assert out_dict['length'] == 19866 def test_prase_header_string_scale(): in_str = \"* 33550", "\"* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823, 0\"", "out_dict = geotiff._prase_header_string(\"* 34737 geo_ascii_params (30s) b'UTM zone 54N|WGS 84|'\")", "484593.41064743, 3862259.92582402], [ 484593.64841806, 3862260.06515117], [ 484593.93077419, 3862259.55455913], [ 484593.67474654,", "poly = shp.read_shp2d('file/shp_test/test.shp') imarray, offset = geotiff.clip_roi(poly['0'], 'file/tiff_test/2_12.tif', is_geo=True) assert", "# ----------------------------------------------- imarray_rgba = np.dstack((imarray_rgb, np.ones((3456, 4608)) * 255)) assert", "easyric.io import geotiff, shp from skimage.io import imread from skimage.color", "np.nan to int, fixed by astype(float) mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif') captured", "464, 464, 464, * 284 planar_configuration (1H) 1 * 305", "model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823, 0\" out_dict =", "geotiff._prase_header_string(geo_head_txt) expected_pixel = np.asarray([[16972, 26086], [16708, 25585], [16946, 25445], [17228,", "'Only numpy.ndarray points and list contains numpy.ndarray points are supported'", "= geotiff._is_roi_type(roi_wrong_3) assert 'Only list contains numpy.ndarray points are supported'", "[456, 789]]) roi2 = [roi1, roi1] roi_wrong_1 = (123, 345)", "(31255Q) (464, 464, 464, 464, 464, 464, 464, 464, 464,", "'file/tiff_test/2_12.tif', is_geo=False) assert len(imarray) == 1 def test_clip_roi_geo(): poly =", "test_point_query_list_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) point = np.asarray([[368023.004,", "and list contains numpy.ndarray points are supported' in str(errinfo.value) with", "* 258 bits_per_sample (4H) (8, 8, 8, 8) * 259", "numpy.ndarray points are supported' in str(errinfo.value) with pytest.raises(TypeError) as errinfo:", "1, 1) * 33550 model_pixel_scale (3d) (0.001, 0.001, 0.0) *", "plt.subplots(1,3, figsize=(12,4)) # ----------------------------------------------- imarray_rgb = imread(photo_path) assert imarray_rgb.shape ==", "278 rows_per_strip (1H) 1 * 279 strip_byte_counts (31255Q) (464, 464,", "errinfo: roi_w3_out = geotiff._is_roi_type(roi_wrong_3) assert 'Only list contains numpy.ndarray points", "CRS: UTM zone 54N]' in captured.out assert out_dict['proj'] == None", "geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point) np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3) def test_point_query_numpy_points(): points = np.asarray([[368022.581,", "len(imarray) == 1 def test_clip_roi_geo(): poly = shp.read_shp2d('file/shp_test/test.shp') imarray, offset", "/ UTM zone 54N|WGS 84|'\" out_dict = geotiff._prase_header_string(in_str) captured =", "= np.asarray([[368023.004, 3955500.669]]) p_list = [point, points] expected = [np.asarray([97.45558]),", "pass def test_mean_values(capsys): mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif') captured = capsys.readouterr() #", "2 * 339 sample_format (4H) (1, 1, 1, 1) *", "import imread from skimage.color import rgb2gray import matplotlib.pyplot as plt", "1, 0, 7, 1024, 0, 1, 1, 1025, 0, 1,", "3955501.054], [368024.032, 3955500.465]]) point = np.asarray([[368023.004, 3955500.669]]) p_list = [point,", "* 278 rows_per_strip (1H) 1 * 279 strip_byte_counts (31255Q) (464,", "import pytest import numpy as np from easyric.io import geotiff,", "= geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) def test_prase_header_string_nodata(): out_dict", "software (12s) b'pix4dmapper' * 317 predictor (1H) 2 * 338", "geotiff._prase_header_string(\"* 256 image_width (1H) 13503\") assert out_dict['width'] == 13503 def", "97.59617]) np.testing.assert_almost_equal(out, expected, decimal=3) def test_point_query_list_numpy_points(): points = np.asarray([[368022.581, 3955501.054],", "assert 'Only numpy.ndarray points and list contains numpy.ndarray points are", "----------------------------------------------- imarray_rgb = imread(photo_path) assert imarray_rgb.shape == (3456, 4608, 3)", "np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3) # another case that not working in", "out_dict['scale'] == (0.0029700000000000004, 0.0029700000000000004) def test_prase_header_string_tie_point(): in_str = \"* 33922", "(8, 8, 8, 8) * 259 compression (1H) 5 *", "geo_ascii_params (30s) b'WGS 84 / UTM zone 54N|WGS 84|'\" out_dict", "[np.asarray([97.45558]), np.asarray([97.624344, 97.59617])] out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list) assert type(expected) ==", "geo_head_txt = \"\"\" TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB, little endian,", "header) np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3) def test_is_roi_type(): roi1 = np.asarray([[123, 456],", "supported' in str(errinfo.value) def test_imarray_clip_2d_rgb_rgba(): photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG' roi =", "decimal=3) np.testing.assert_almost_equal(expected[1], out[1], decimal=3) def test_point_query_wrong_types(): # [TODO] pass def", "4608, 3) im_out_rgb, offsets_rgb = geotiff.imarray_clip(imarray_rgb, roi) ax[1].imshow(im_out_rgb / 255)", "np.asarray([[123, 456], [456, 789]]) roi2 = [roi1, roi1] roi_wrong_1 =", "(31255Q) (500650, 501114, 501578, 502042, 502506, 502970, 5 * 277", "19436 * 257 image_length (1H) 31255 * 258 bits_per_sample (4H)", "0, 7, 1024, 0, 1, 1, 1025, 0, 1, 1,", "little endian, bigtiff Series 0: 31255x19436x4, uint8, YXS, 1 pages,", "that not working in previous version: # Cannot convert np.nan", "captured = capsys.readouterr() assert f\"[io][geotiff][GeoCorrd] Comprehense [{in_str}]\" in captured.out assert", "roi = np.asarray([[2251, 1223], [2270, 1270], [2227, 1263], [2251, 1223]])", "test_get_imarray_without_header(capsys): pass def test_get_imarray_with_header(capsys): pass def test_point_query_one_point(): point = (368023.004,", "UTM ... should be full out_dict = geotiff._prase_header_string(\"* 34737 geo_ascii_params", "np.float(72.31657466298653), decimal=3) def test_gis2pixel2gis(): geo_head_txt = \"\"\" TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif,", "def test_clip_roi_geo(): poly = shp.read_shp2d('file/shp_test/test.shp') imarray, offset = geotiff.clip_roi(poly['0'], 'file/tiff_test/2_12.tif',", "geotiff._prase_header_string(\"* 42113 gdal_nodata (7s) b'-10000'\") assert out_dict['nodata'] == -10000 def", "astype(float) mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif') captured = capsys.readouterr() np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3)", "257 image_length (1H) 19866\") assert out_dict['length'] == 19866 def test_prase_header_string_scale():", "assert out_dict['nodata'] == -10000 def test_prase_header_string_proj_normal(capsys): in_str = \"* 34737", "expected_pixel) gis_revert = geotiff.pixel2geo(pixel_coord, header) np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3) def test_is_roi_type():", "4 * 278 rows_per_strip (1H) 1 * 279 strip_byte_counts (31255Q)", "= geotiff.imarray_clip(imarray_2d, roi) ax[0].imshow(im_out_2d, cmap='gray') ax[0].set_title('gray') # ----------------------------------------------- imarray_rgba =", "matplotlib.pyplot as plt def test_prase_header_string_width(): out_dict = geotiff._prase_header_string(\"* 256 image_width", "in_str = \"* 34737 geo_ascii_params (30s) b'WGS 84 / UTM", "np.ones((3456, 4608)) * 255)) assert imarray_rgba.shape == (3456, 4608, 4)", "offsets = geotiff.imarray_clip(imarray_rgba, roi) ax[2].imshow(im_out_rgba/255) ax[2].set_title('rgba') plt.show() def test_clip_roi_pixel(): poly", "84|' \"\"\" gis_coord = np.asarray([[ 484593.67474654, 3862259.42413431], [ 484593.41064743, 3862259.92582402],", "34737 geo_ascii_params (30s) b'UTM zone 54N|WGS 84|'\") captured = capsys.readouterr()", "errinfo: roi_w1_out = geotiff._is_roi_type(roi_wrong_1) assert 'Only numpy.ndarray points and list", "ax[1].set_title('rgb') # ----------------------------------------------- imarray_2d = rgb2gray(imarray_rgb) assert imarray_2d.shape == (3456,", "capsys.readouterr() # When not convert to float, mean_values = 97.562584", "imarray_rgb.shape == (3456, 4608, 3) im_out_rgb, offsets_rgb = geotiff.imarray_clip(imarray_rgb, roi)", "1, 1025, 0, 1, 1, 1026 * 34737 geo_ascii_params (30s)", "geotiff.mean_values(r'file/tiff_test/2_12.tif') captured = capsys.readouterr() np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3) def test_gis2pixel2gis(): geo_head_txt", "out_dict['proj'] == pyproj.CRS.from_epsg(32654) def test_prase_header_string_proj_error(capsys): # should raise error because", "numpy as np from easyric.io import geotiff, shp from skimage.io", "test_point_query_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif',", "279 strip_byte_counts (31255Q) (464, 464, 464, 464, 464, 464, 464,", "= plt.subplots(1,3, figsize=(12,4)) # ----------------------------------------------- imarray_rgb = imread(photo_path) assert imarray_rgb.shape", "464, 464, 464, 464, 464, * 284 planar_configuration (1H) 1", "[368024.032, 3955500.465]]) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points) expected = np.asarray([97.624344, 97.59617])", "assert 'Only list contains numpy.ndarray points are supported' in str(errinfo.value)", "contains numpy.ndarray points are supported' in str(errinfo.value) def test_imarray_clip_2d_rgb_rgba(): photo_path", "roi) ax[1].imshow(im_out_rgb / 255) ax[1].set_title('rgb') # ----------------------------------------------- imarray_2d = rgb2gray(imarray_rgb)", "not convert to float, mean_values = 97.562584 # assert mean_ht", "out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) in_str =", "[Input is not a CRS: UTM zone 54N]' in captured.out", "pytest.raises(TypeError) as errinfo: roi_w3_out = geotiff._is_roi_type(roi_wrong_3) assert 'Only list contains", "345], [456, 789]] roi1_out = geotiff._is_roi_type(roi1) assert roi1_out == [roi1]", "(0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823, 0\" out_dict = geotiff._prase_header_string(in_str) assert", "53N|WGS 84|' \"\"\" gis_coord = np.asarray([[ 484593.67474654, 3862259.42413431], [ 484593.41064743,", "= geotiff.imarray_clip(imarray_rgba, roi) ax[2].imshow(im_out_rgba/255) ax[2].set_title('rgba') plt.show() def test_clip_roi_pixel(): poly =", "assert imarray_2d.shape == (3456, 4608) im_out_2d, offsets_2d = geotiff.imarray_clip(imarray_2d, roi)", "convert to float, mean_values = 97.562584 # assert mean_ht ==", "3956071.13823, 0\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823)", "expected = [np.asarray([97.45558]), np.asarray([97.624344, 97.59617])] out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list) assert", "def test_prase_header_string_proj_normal(capsys): in_str = \"* 34737 geo_ascii_params (30s) b'WGS 84", "MiB, little endian, bigtiff Series 0: 31255x19436x4, uint8, YXS, 1", "19866\") assert out_dict['length'] == 19866 def test_prase_header_string_scale(): in_str = \"*", "464, 464, * 284 planar_configuration (1H) 1 * 305 software", "1025, 0, 1, 1, 1026 * 34737 geo_ascii_params (30s) b'WGS", "33922 model_tie_point (6d) (0.0, 0.0, 0.0, 484576.70205, 3862285.5109300003, * 34735", "31255 * 258 bits_per_sample (4H) (8, 8, 8, 8) *", "np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3) def test_point_query_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032,", "345) roi_wrong_2 = [123, 456] roi_wrong_3 = [[123, 345], [456,", "import geotiff, shp from skimage.io import imread from skimage.color import", "assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) in_str = \"* 33922 model_tie_point", "3862259.42413431], [ 484593.41064743, 3862259.92582402], [ 484593.64841806, 3862260.06515117], [ 484593.93077419, 3862259.55455913],", "captured = capsys.readouterr() # When not convert to float, mean_values", "in str(errinfo.value) def test_imarray_clip_2d_rgb_rgba(): photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG' roi = np.asarray([[2251,", "[16708, 25585], [16946, 25445], [17228, 25956], [16972, 26086]]) pixel_coord =", "roi) ax[2].imshow(im_out_rgba/255) ax[2].set_title('rgba') plt.show() def test_clip_roi_pixel(): poly = shp.read_shp2d('file/shp_test/test.shp') poly_pixel", "poly_pixel = geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif')) imarray, offset = geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False)", "predictor (1H) 2 * 338 extra_samples (1H) 2 * 339", "np.asarray([[ 484593.67474654, 3862259.42413431], [ 484593.41064743, 3862259.92582402], [ 484593.64841806, 3862260.06515117], [", "(1H) 19436 * 257 image_length (1H) 31255 * 258 bits_per_sample", "== 13503 def test_prase_header_string_length(): out_dict = geotiff._prase_header_string(\"* 257 image_length (1H)", "out_dict['length'] == 19866 def test_prase_header_string_scale(): in_str = \"* 33550 model_pixel_scale", "assert roi2_out == roi2 with pytest.raises(TypeError) as errinfo: roi_w1_out =", "84|'\" out_dict = geotiff._prase_header_string(in_str) captured = capsys.readouterr() assert f\"[io][geotiff][GeoCorrd] Comprehense", "* 338 extra_samples (1H) 2 * 339 sample_format (4H) (1,", "= np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) point = np.asarray([[368023.004, 3955500.669]]) p_list", "273 strip_offsets (31255Q) (500650, 501114, 501578, 502042, 502506, 502970, 5", "368090.77975000005, 3956071.13823,\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823)", "point) np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3) def test_point_query_numpy_points(): points = np.asarray([[368022.581, 3955501.054],", "'[io][geotiff][GeoCorrd] Generation failed, because [Input is not a CRS: UTM", "(368090.77975000005, 3956071.13823) def test_prase_header_string_nodata(): out_dict = geotiff._prase_header_string(\"* 42113 gdal_nodata (7s)", "3955501.054], [368024.032, 3955500.465]]) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points) expected = np.asarray([97.624344,", "assert out_dict['scale'] == (0.0029700000000000004, 0.0029700000000000004) def test_prase_header_string_tie_point(): in_str = \"*", "np.float32(97.45558), decimal=3) def test_point_query_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]])", "captured = capsys.readouterr() np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3) def test_gis2pixel2gis(): geo_head_txt =", "(4H) (8, 8, 8, 8) * 259 compression (1H) 5", "/ 255) ax[1].set_title('rgb') # ----------------------------------------------- imarray_2d = rgb2gray(imarray_rgb) assert imarray_2d.shape", "def test_gis2pixel2gis(): geo_head_txt = \"\"\" TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB,", "should be full out_dict = geotiff._prase_header_string(\"* 34737 geo_ascii_params (30s) b'UTM", "def test_prase_header_string_width(): out_dict = geotiff._prase_header_string(\"* 256 image_width (1H) 13503\") assert", "out_dict = geotiff._prase_header_string(in_str) captured = capsys.readouterr() assert f\"[io][geotiff][GeoCorrd] Comprehense [{in_str}]\"", "with pytest.raises(TypeError) as errinfo: roi_w3_out = geotiff._is_roi_type(roi_wrong_3) assert 'Only list", "0\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['scale'] == (0.0029700000000000004, 0.0029700000000000004) def", "geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list) assert type(expected) == type(out) np.testing.assert_almost_equal(expected[0], out[0], decimal=3) np.testing.assert_almost_equal(expected[1],", "pages, not mem-mappable Page 0: 31255x19436x4, uint8, 8 bit, rgb,", "= \"* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823,\"", "1223]]) fig, ax = plt.subplots(1,3, figsize=(12,4)) # ----------------------------------------------- imarray_rgb =", "im_out_2d, offsets_2d = geotiff.imarray_clip(imarray_2d, roi) ax[0].imshow(im_out_2d, cmap='gray') ax[0].set_title('gray') # -----------------------------------------------", "assert out_dict['width'] == 13503 def test_prase_header_string_length(): out_dict = geotiff._prase_header_string(\"* 257", "3956071.13823) def test_prase_header_string_nodata(): out_dict = geotiff._prase_header_string(\"* 42113 gdal_nodata (7s) b'-10000'\")", "geotiff._prase_header_string(in_str) captured = capsys.readouterr() assert f\"[io][geotiff][GeoCorrd] Comprehense [{in_str}]\" in captured.out", "float, mean_values = 97.562584 # assert mean_ht == np.float32(97.562584) np.testing.assert_almost_equal(mean_ht,", "5 * 262 photometric (1H) 2 * 273 strip_offsets (31255Q)", "out[0], decimal=3) np.testing.assert_almost_equal(expected[1], out[1], decimal=3) def test_point_query_wrong_types(): # [TODO] pass", "13503\") assert out_dict['width'] == 13503 def test_prase_header_string_length(): out_dict = geotiff._prase_header_string(\"*", "1, 1, 1026 * 34737 geo_ascii_params (30s) b'WGS 84 /", "1263], [2251, 1223]]) fig, ax = plt.subplots(1,3, figsize=(12,4)) # -----------------------------------------------", "np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points) expected =", "def test_prase_header_string_nodata(): out_dict = geotiff._prase_header_string(\"* 42113 gdal_nodata (7s) b'-10000'\") assert", "out_dict = geotiff._prase_header_string(\"* 42113 gdal_nodata (7s) b'-10000'\") assert out_dict['nodata'] ==", "[ 484593.93077419, 3862259.55455913], [ 484593.67474654, 3862259.42413431]]) header = geotiff._prase_header_string(geo_head_txt) expected_pixel", "errinfo: roi_w2_out = geotiff._is_roi_type(roi_wrong_2) assert 'Only list contains numpy.ndarray points", "* 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 53N|WGS", "19866 def test_prase_header_string_scale(): in_str = \"* 33550 model_pixel_scale (3d) (0.0029700000000000004,", "25445], [17228, 25956], [16972, 26086]]) pixel_coord = geotiff.geo2pixel(gis_coord, header) np.testing.assert_almost_equal(pixel_coord,", "[2251, 1223]]) fig, ax = plt.subplots(1,3, figsize=(12,4)) # ----------------------------------------------- imarray_rgb", "gdal_nodata (7s) b'-10000'\") assert out_dict['nodata'] == -10000 def test_prase_header_string_proj_normal(capsys): in_str", "257 image_length (1H) 31255 * 258 bits_per_sample (4H) (8, 8,", "import matplotlib.pyplot as plt def test_prase_header_string_width(): out_dict = geotiff._prase_header_string(\"* 256", "points are supported' in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w2_out", "roi) ax[0].imshow(im_out_2d, cmap='gray') ax[0].set_title('gray') # ----------------------------------------------- imarray_rgba = np.dstack((imarray_rgb, np.ones((3456,", "464, 464, 464, 464, 464, 464, 464, * 284 planar_configuration", "rows_per_strip (1H) 1 * 279 strip_byte_counts (31255Q) (464, 464, 464,", "def test_point_query_input_ndarray(): # [Todo] pass def test_mean_values(capsys): mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif')", "because [Input is not a CRS: UTM zone 54N]' in", "np.testing.assert_almost_equal(expected[1], out[1], decimal=3) def test_point_query_wrong_types(): # [TODO] pass def test_point_query_input_ndarray():", "roi1 = np.asarray([[123, 456], [456, 789]]) roi2 = [roi1, roi1]", "13503 def test_prase_header_string_length(): out_dict = geotiff._prase_header_string(\"* 257 image_length (1H) 19866\")", "UTM zone 54N|WGS 84|'\" out_dict = geotiff._prase_header_string(in_str) captured = capsys.readouterr()", "test_prase_header_string_scale(): in_str = \"* 33550 model_pixel_scale (3d) (0.0029700000000000004, 0.0029700000000000004, 0\"", "8 bit, rgb, lzw * 256 image_width (1H) 19436 *", "[2227, 1263], [2251, 1223]]) fig, ax = plt.subplots(1,3, figsize=(12,4)) #", "== -10000 def test_prase_header_string_proj_normal(capsys): in_str = \"* 34737 geo_ascii_params (30s)", "test_prase_header_string_tie_point(): in_str = \"* 33922 model_tie_point (6d) (0.0, 0.0, 0.0,", "np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3) def test_is_roi_type(): roi1 = np.asarray([[123, 456], [456,", "model_pixel_scale (3d) (0.001, 0.001, 0.0) * 33922 model_tie_point (6d) (0.0,", "assert f\"[io][geotiff][GeoCorrd] Comprehense [{in_str}]\" in captured.out assert out_dict['proj'] == pyproj.CRS.from_epsg(32654)", "(0.0029700000000000004, 0.0029700000000000004, 0\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['scale'] == (0.0029700000000000004,", "endian, bigtiff Series 0: 31255x19436x4, uint8, YXS, 1 pages, not", "8) * 259 compression (1H) 5 * 262 photometric (1H)", "def test_prase_header_string_proj_error(capsys): # should raise error because WGS 84 /", "test_mean_values(capsys): mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif') captured = capsys.readouterr() # When not", "gis_coord, decimal=3) def test_is_roi_type(): roi1 = np.asarray([[123, 456], [456, 789]])", "= geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points) expected = np.asarray([97.624344, 97.59617]) np.testing.assert_almost_equal(out, expected, decimal=3)", "= geotiff._prase_header_string(in_str) captured = capsys.readouterr() assert f\"[io][geotiff][GeoCorrd] Comprehense [{in_str}]\" in", "502506, 502970, 5 * 277 samples_per_pixel (1H) 4 * 278", "captured = capsys.readouterr() assert '[io][geotiff][GeoCorrd] Generation failed, because [Input is", "= [roi1, roi1] roi_wrong_1 = (123, 345) roi_wrong_2 = [123,", "assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) def test_prase_header_string_nodata(): out_dict = geotiff._prase_header_string(\"*", "imarray_2d.shape == (3456, 4608) im_out_2d, offsets_2d = geotiff.imarray_clip(imarray_2d, roi) ax[0].imshow(im_out_2d,", "0.0, 0.0, 368090.77975000005, 3956071.13823, 0\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point']", "[16972, 26086]]) pixel_coord = geotiff.geo2pixel(gis_coord, header) np.testing.assert_almost_equal(pixel_coord, expected_pixel) gis_revert =", "[ 484593.41064743, 3862259.92582402], [ 484593.64841806, 3862260.06515117], [ 484593.93077419, 3862259.55455913], [", "== roi2 with pytest.raises(TypeError) as errinfo: roi_w1_out = geotiff._is_roi_type(roi_wrong_1) assert", "from skimage.io import imread from skimage.color import rgb2gray import matplotlib.pyplot", "501114, 501578, 502042, 502506, 502970, 5 * 277 samples_per_pixel (1H)", "== pyproj.CRS.from_epsg(32654) def test_prase_header_string_proj_error(capsys): # should raise error because WGS", "0\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) def", "0.0) * 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 484576.70205, 3862285.5109300003,", "* 279 strip_byte_counts (31255Q) (464, 464, 464, 464, 464, 464,", "(30s) b'UTM zone 54N|WGS 84|'\") captured = capsys.readouterr() assert '[io][geotiff][GeoCorrd]", "model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823,\" out_dict = geotiff._prase_header_string(in_str)", "decimal=3) def test_is_roi_type(): roi1 = np.asarray([[123, 456], [456, 789]]) roi2", "0.0, 484576.70205, 3862285.5109300003, * 34735 geo_key_directory (32H) (1, 1, 0,", "geotiff.pixel2geo(pixel_coord, header) np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3) def test_is_roi_type(): roi1 = np.asarray([[123,", "= imread(photo_path) assert imarray_rgb.shape == (3456, 4608, 3) im_out_rgb, offsets_rgb", "points] expected = [np.asarray([97.45558]), np.asarray([97.624344, 97.59617])] out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list)", "test_prase_header_string_nodata(): out_dict = geotiff._prase_header_string(\"* 42113 gdal_nodata (7s) b'-10000'\") assert out_dict['nodata']", "is not a CRS: UTM zone 54N]' in captured.out assert", "= [point, points] expected = [np.asarray([97.45558]), np.asarray([97.624344, 97.59617])] out =", "assert len(imarray) == 1 def test_clip_roi_geo(): poly = shp.read_shp2d('file/shp_test/test.shp') imarray,", "p_list) assert type(expected) == type(out) np.testing.assert_almost_equal(expected[0], out[0], decimal=3) np.testing.assert_almost_equal(expected[1], out[1],", "np.dstack((imarray_rgb, np.ones((3456, 4608)) * 255)) assert imarray_rgba.shape == (3456, 4608,", "import pyproj import pytest import numpy as np from easyric.io", "0.0029700000000000004, 0\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['scale'] == (0.0029700000000000004, 0.0029700000000000004)", "256 image_width (1H) 13503\") assert out_dict['width'] == 13503 def test_prase_header_string_length():", "[456, 789]] roi1_out = geotiff._is_roi_type(roi1) assert roi1_out == [roi1] roi2_out", "im_out_rgba, offsets = geotiff.imarray_clip(imarray_rgba, roi) ax[2].imshow(im_out_rgba/255) ax[2].set_title('rgba') plt.show() def test_clip_roi_pixel():", "= geotiff._prase_header_string(\"* 257 image_length (1H) 19866\") assert out_dict['length'] == 19866", "(500650, 501114, 501578, 502042, 502506, 502970, 5 * 277 samples_per_pixel", "roi2 with pytest.raises(TypeError) as errinfo: roi_w1_out = geotiff._is_roi_type(roi_wrong_1) assert 'Only", "offsets_rgb = geotiff.imarray_clip(imarray_rgb, roi) ax[1].imshow(im_out_rgb / 255) ax[1].set_title('rgb') # -----------------------------------------------", "expected_pixel = np.asarray([[16972, 26086], [16708, 25585], [16946, 25445], [17228, 25956],", "(0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823,\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point']", "= geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False) assert len(imarray) == 1 def test_clip_roi_geo():", "34735 geo_key_directory (32H) (1, 1, 0, 7, 1024, 0, 1,", "(3456, 4608) im_out_2d, offsets_2d = geotiff.imarray_clip(imarray_2d, roi) ax[0].imshow(im_out_2d, cmap='gray') ax[0].set_title('gray')", "out_dict['proj'] == None def test_get_imarray_without_header(capsys): pass def test_get_imarray_with_header(capsys): pass def", "[TODO] pass def test_point_query_input_ndarray(): # [Todo] pass def test_mean_values(capsys): mean_ht", "'Only list contains numpy.ndarray points are supported' in str(errinfo.value) def", "= geotiff.geo2pixel(gis_coord, header) np.testing.assert_almost_equal(pixel_coord, expected_pixel) gis_revert = geotiff.pixel2geo(pixel_coord, header) np.testing.assert_almost_equal(gis_revert,", "1223], [2270, 1270], [2227, 1263], [2251, 1223]]) fig, ax =", "# assert mean_ht == np.float32(97.562584) np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3) # another", "def test_point_query_wrong_types(): # [TODO] pass def test_point_query_input_ndarray(): # [Todo] pass", "[roi1, roi1] roi_wrong_1 = (123, 345) roi_wrong_2 = [123, 456]", "Cannot convert np.nan to int, fixed by astype(float) mean_ht =", "= geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list) assert type(expected) == type(out) np.testing.assert_almost_equal(expected[0], out[0], decimal=3)", "3862259.55455913], [ 484593.67474654, 3862259.42413431]]) header = geotiff._prase_header_string(geo_head_txt) expected_pixel = np.asarray([[16972,", "[roi1] roi2_out = geotiff._is_roi_type(roi2) assert roi2_out == roi2 with pytest.raises(TypeError)", "[{in_str}]\" in captured.out assert out_dict['proj'] == pyproj.CRS.from_epsg(32654) def test_prase_header_string_proj_error(capsys): #", "imarray, offset = geotiff.clip_roi(poly['0'], 'file/tiff_test/2_12.tif', is_geo=True) assert len(imarray) == 1", "\"\"\" TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB, little endian, bigtiff Series", "assert '[io][geotiff][GeoCorrd] Generation failed, because [Input is not a CRS:", "out[1], decimal=3) def test_point_query_wrong_types(): # [TODO] pass def test_point_query_input_ndarray(): #", "464, * 284 planar_configuration (1H) 1 * 305 software (12s)", "3862260.06515117], [ 484593.93077419, 3862259.55455913], [ 484593.67474654, 3862259.42413431]]) header = geotiff._prase_header_string(geo_head_txt)", "= [123, 456] roi_wrong_3 = [[123, 345], [456, 789]] roi1_out", "numpy.ndarray points are supported' in str(errinfo.value) def test_imarray_clip_2d_rgb_rgba(): photo_path =", "502042, 502506, 502970, 5 * 277 samples_per_pixel (1H) 4 *", "model_pixel_scale (3d) (0.0029700000000000004, 0.0029700000000000004, 0\" out_dict = geotiff._prase_header_string(in_str) assert out_dict['scale']", "test_prase_header_string_proj_normal(capsys): in_str = \"* 34737 geo_ascii_params (30s) b'WGS 84 /", "roi_wrong_3 = [[123, 345], [456, 789]] roi1_out = geotiff._is_roi_type(roi1) assert", "not a CRS: UTM zone 54N]' in captured.out assert out_dict['proj']", "3955500.465]]) point = np.asarray([[368023.004, 3955500.669]]) p_list = [point, points] expected", "skimage.io import imread from skimage.color import rgb2gray import matplotlib.pyplot as", "captured.out assert out_dict['proj'] == pyproj.CRS.from_epsg(32654) def test_prase_header_string_proj_error(capsys): # should raise", "(4H) (1, 1, 1, 1) * 33550 model_pixel_scale (3d) (0.001,", "8, 8, 8) * 259 compression (1H) 5 * 262", "(1, 1, 0, 7, 1024, 0, 1, 1, 1025, 0,", "7, 1024, 0, 1, 1, 1025, 0, 1, 1, 1026", "\"* 33550 model_pixel_scale (3d) (0.0029700000000000004, 0.0029700000000000004, 0\" out_dict = geotiff._prase_header_string(in_str)", "np.asarray([[368023.004, 3955500.669]]) p_list = [point, points] expected = [np.asarray([97.45558]), np.asarray([97.624344,", "model_tie_point (6d) (0.0, 0.0, 0.0, 484576.70205, 3862285.5109300003, * 34735 geo_key_directory", "= geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif')) imarray, offset = geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False) assert", "= 'file/pix4d.diy/photos/DJI_0174.JPG' roi = np.asarray([[2251, 1223], [2270, 1270], [2227, 1263],", "== 1 def test_clip_roi_geo(): poly = shp.read_shp2d('file/shp_test/test.shp') imarray, offset =", "def test_point_query_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) out =", "shp.read_shp2d('file/shp_test/test.shp') poly_pixel = geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif')) imarray, offset = geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif',", "(1H) 1 * 279 strip_byte_counts (31255Q) (464, 464, 464, 464,", "1270], [2227, 1263], [2251, 1223]]) fig, ax = plt.subplots(1,3, figsize=(12,4))", "image_width (1H) 19436 * 257 image_length (1H) 31255 * 258", "assert out_dict['proj'] == None def test_get_imarray_without_header(capsys): pass def test_get_imarray_with_header(capsys): pass", "geo_ascii_params (30s) b'UTM zone 54N|WGS 84|'\") captured = capsys.readouterr() assert", "(3456, 4608, 4) im_out_rgba, offsets = geotiff.imarray_clip(imarray_rgba, roi) ax[2].imshow(im_out_rgba/255) ax[2].set_title('rgba')", "* 317 predictor (1H) 2 * 338 extra_samples (1H) 2", "ax[0].imshow(im_out_2d, cmap='gray') ax[0].set_title('gray') # ----------------------------------------------- imarray_rgba = np.dstack((imarray_rgb, np.ones((3456, 4608))", "p_list = [point, points] expected = [np.asarray([97.45558]), np.asarray([97.624344, 97.59617])] out", "(0.0029700000000000004, 0.0029700000000000004) def test_prase_header_string_tie_point(): in_str = \"* 33922 model_tie_point (6d)", "= geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif') captured = capsys.readouterr() # When not convert to", "= np.asarray([97.624344, 97.59617]) np.testing.assert_almost_equal(out, expected, decimal=3) def test_point_query_list_numpy_points(): points =", "= geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) in_str = \"*", "84|'\") captured = capsys.readouterr() assert '[io][geotiff][GeoCorrd] Generation failed, because [Input", "assert out_dict['proj'] == pyproj.CRS.from_epsg(32654) def test_prase_header_string_proj_error(capsys): # should raise error", "(1H) 5 * 262 photometric (1H) 2 * 273 strip_offsets", "/ UTM zone 53N|WGS 84|' \"\"\" gis_coord = np.asarray([[ 484593.67474654,", "out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) def test_prase_header_string_nodata():", "[ 484593.67474654, 3862259.42413431]]) header = geotiff._prase_header_string(geo_head_txt) expected_pixel = np.asarray([[16972, 26086],", "= geotiff._is_roi_type(roi_wrong_2) assert 'Only list contains numpy.ndarray points are supported'", "ax[2].set_title('rgba') plt.show() def test_clip_roi_pixel(): poly = shp.read_shp2d('file/shp_test/test.shp') poly_pixel = geotiff.geo2pixel(poly['0'],", "strip_byte_counts (31255Q) (464, 464, 464, 464, 464, 464, 464, 464,", "geotiff._is_roi_type(roi_wrong_3) assert 'Only list contains numpy.ndarray points are supported' in", "pyproj.CRS.from_epsg(32654) def test_prase_header_string_proj_error(capsys): # should raise error because WGS 84", "# [TODO] pass def test_point_query_input_ndarray(): # [Todo] pass def test_mean_values(capsys):", "mean_values = 97.562584 # assert mean_ht == np.float32(97.562584) np.testing.assert_almost_equal(mean_ht, np.float32(97.562584),", "(464, 464, 464, 464, 464, 464, 464, 464, 464, *", "* 273 strip_offsets (31255Q) (500650, 501114, 501578, 502042, 502506, 502970,", "----------------------------------------------- imarray_2d = rgb2gray(imarray_rgb) assert imarray_2d.shape == (3456, 4608) im_out_2d,", "3) im_out_rgb, offsets_rgb = geotiff.imarray_clip(imarray_rgb, roi) ax[1].imshow(im_out_rgb / 255) ax[1].set_title('rgb')", "points) expected = np.asarray([97.624344, 97.59617]) np.testing.assert_almost_equal(out, expected, decimal=3) def test_point_query_list_numpy_points():", "4) im_out_rgba, offsets = geotiff.imarray_clip(imarray_rgba, roi) ax[2].imshow(im_out_rgba/255) ax[2].set_title('rgba') plt.show() def", "roi_w2_out = geotiff._is_roi_type(roi_wrong_2) assert 'Only list contains numpy.ndarray points are", "== (0.0029700000000000004, 0.0029700000000000004) def test_prase_header_string_tie_point(): in_str = \"* 33922 model_tie_point", "not working in previous version: # Cannot convert np.nan to", "1 * 305 software (12s) b'pix4dmapper' * 317 predictor (1H)", "0: 31255x19436x4, uint8, 8 bit, rgb, lzw * 256 image_width", "= geotiff.pixel2geo(pixel_coord, header) np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3) def test_is_roi_type(): roi1 =", "by astype(float) mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif') captured = capsys.readouterr() np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653),", "-10000 def test_prase_header_string_proj_normal(capsys): in_str = \"* 34737 geo_ascii_params (30s) b'WGS", "np.testing.assert_almost_equal(expected[0], out[0], decimal=3) np.testing.assert_almost_equal(expected[1], out[1], decimal=3) def test_point_query_wrong_types(): # [TODO]", "sample_format (4H) (1, 1, 1, 1) * 33550 model_pixel_scale (3d)", "are supported' in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w2_out =", "decimal=3) # another case that not working in previous version:", "97.562584 # assert mean_ht == np.float32(97.562584) np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3) #", "TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB, little endian, bigtiff Series 0:", "test_prase_header_string_width(): out_dict = geotiff._prase_header_string(\"* 256 image_width (1H) 13503\") assert out_dict['width']", "imarray_2d = rgb2gray(imarray_rgb) assert imarray_2d.shape == (3456, 4608) im_out_2d, offsets_2d", "mean_ht == np.float32(97.562584) np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3) # another case that", "test_point_query_wrong_types(): # [TODO] pass def test_point_query_input_ndarray(): # [Todo] pass def", "geotiff._prase_header_string(in_str) assert out_dict['scale'] == (0.0029700000000000004, 0.0029700000000000004) def test_prase_header_string_tie_point(): in_str =" ]
[ "self._city @city.setter def city(self, city): \"\"\"Sets the city of this", "\"\"\" self._address_line2 = address_line2 @property def city(self): \"\"\"Gets the city", "lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value ))", "help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501", "not None: self.county = county if postal_code is not None:", "ItemLocation. The county in which the item is located. #", "return self._address_line2 @address_line2.setter def address_line2(self, address_line2): \"\"\"Sets the address_line2 of", "is located. # noqa: E501 :param state_or_province: The state_or_province of", "The county of this ItemLocation. # noqa: E501 :type: str", "located. # noqa: E501 :return: The state_or_province of this ItemLocation.", "six class ItemLocation(object): \"\"\"NOTE: This class is auto generated by", "<br /><span class=\\\"tablenote\\\"> <b> Note: </b>Beginning in late January 2020,", "not None: self.country = country if county is not None:", "of this ItemLocation. # noqa: E501 The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO", "the item is located. # noqa: E501 :return: The state_or_province", "of this ItemLocation. The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard code", "this ItemLocation. # noqa: E501 The second line of the", "key, value in self.items(): result[key] = value return result def", "item is located.<br /> <br /><span class=\\\"tablenote\\\"> <b> Note: </b>Beginning", "city if country is not None: self.country = country if", "= None self._address_line2 = None self._city = None self._country =", "the address_line1 of this ItemLocation. # noqa: E501 The first", "codes in slightly different ways, but an example would be", "class=\\\"legend-icon partners-icon\\\" title=\\\"Limited Release\\\" alt=\\\"Limited Release\\\" />(Limited Release)</a> The Marketplace", "<code>951**</code>.</span> # noqa: E501 :return: The postal_code of this ItemLocation.", "other): \"\"\"Returns true if both objects are equal\"\"\" if not", "value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value,", "import re # noqa: F401 import six class ItemLocation(object): \"\"\"NOTE:", "- a model defined in Swagger\"\"\" # noqa: E501 self._address_line1", "postal_code @property def state_or_province(self): \"\"\"Gets the state_or_province of this ItemLocation.", "E501 :rtype: str \"\"\" return self._postal_code @postal_code.setter def postal_code(self, postal_code):", ":rtype: str \"\"\" return self._address_line2 @address_line2.setter def address_line2(self, address_line2): \"\"\"Sets", "# noqa: E501 The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard code", "\"\"\"Gets the county of this ItemLocation. # noqa: E501 The", "noqa: E501 :return: The state_or_province of this ItemLocation. # noqa:", ":rtype: str \"\"\" return self._country @country.setter def country(self, country): \"\"\"Sets", "\"\"\" Marketplace Insights API <a href=\\\"https://developer.ebay.com/api-docs/static/versioning.html#limited\\\" target=\\\"_blank\\\"> <img src=\\\"/cms/img/docs/partners-api.svg\\\" class=\\\"legend-icon", "E501 :rtype: str \"\"\" return self._country @country.setter def country(self, country):", "noqa: E501 :return: The postal_code of this ItemLocation. # noqa:", "Different countries will mask postal/zip codes in slightly different ways,", "not None: self.address_line2 = address_line2 if city is not None:", "refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :param", "is not None: self.address_line1 = address_line1 if address_line2 is not", "self._county = county @property def postal_code(self): \"\"\"Gets the postal_code of", "= { 'address_line1': 'str', 'address_line2': 'str', 'city': 'str', 'country': 'str',", "__repr__(self): \"\"\"For `print` and `pprint`\"\"\" return self.to_str() def __eq__(self, other):", "of this ItemLocation. # noqa: E501 :type: str \"\"\" self._state_or_province", "category, and product and returns the of sales history of", "both objects are not equal\"\"\" return not self == other", "state_or_province: The state_or_province of this ItemLocation. # noqa: E501 :type:", "the value is attribute type. attribute_map (dict): The key is", "\"\"\" self._country = country @property def county(self): \"\"\"Gets the county", "`pprint`\"\"\" return self.to_str() def __eq__(self, other): \"\"\"Returns true if both", "all users. Different countries will mask postal/zip codes in slightly", "'county', 'postal_code': 'postalCode', 'state_or_province': 'stateOrProvince' } def __init__(self, address_line1=None, address_line2=None,", "of this ItemLocation. # noqa: E501 The city in which", "Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" import pprint import re # noqa:", "code generator program. Do not edit the class manually. \"\"\"", "located. # noqa: E501 :return: The county of this ItemLocation.", "city): \"\"\"Sets the city of this ItemLocation. The city in", "key in definition. \"\"\" swagger_types = { 'address_line1': 'str', 'address_line2':", "noqa: E501 :return: The address_line2 of this ItemLocation. # noqa:", "model\"\"\" return pprint.pformat(self.to_dict()) def __repr__(self): \"\"\"For `print` and `pprint`\"\"\" return", "value in self.items(): result[key] = value return result def to_str(self):", "\"\"\" return self._state_or_province @state_or_province.setter def state_or_province(self, state_or_province): \"\"\"Sets the state_or_province", "string representation of the model\"\"\" return pprint.pformat(self.to_dict()) def __repr__(self): \"\"\"For", "Marketplace Insights API <a href=\\\"https://developer.ebay.com/api-docs/static/versioning.html#limited\\\" target=\\\"_blank\\\"> <img src=\\\"/cms/img/docs/partners-api.svg\\\" class=\\\"legend-icon partners-icon\\\"", "of the street address. This field may contain such values", "'str' } attribute_map = { 'address_line1': 'addressLine1', 'address_line2': 'addressLine2', 'city':", "return result def to_str(self): \"\"\"Returns the string representation of the", "The county in which the item is located. # noqa:", "this ItemLocation. The second line of the street address. This", "defined in Swagger\"\"\" # noqa: E501 self._address_line1 = None self._address_line2", "self._city = None self._country = None self._county = None self._postal_code", "the item is located.<br /> <br /><span class=\\\"tablenote\\\"> <b> Note:", "\"\"\"For `print` and `pprint`\"\"\" return self.to_str() def __eq__(self, other): \"\"\"Returns", "name and the value is json key in definition. \"\"\"", "The key is attribute name and the value is attribute", "E501 :return: The address_line1 of this ItemLocation. # noqa: E501", "self.to_str() def __eq__(self, other): \"\"\"Returns true if both objects are", "\"\"\"Sets the country of this ItemLocation. The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO", "if county is not None: self.county = county if postal_code", "attribute_map = { 'address_line1': 'addressLine1', 'address_line2': 'addressLine2', 'city': 'city', 'country':", "the ability to search for sold items on eBay by", "'city': 'city', 'country': 'country', 'county': 'county', 'postal_code': 'postalCode', 'state_or_province': 'stateOrProvince'", "self._country = country @property def county(self): \"\"\"Gets the county of", "@property def postal_code(self): \"\"\"Gets the postal_code of this ItemLocation. #", "The county of this ItemLocation. # noqa: E501 :rtype: str", "county is not None: self.county = county if postal_code is", "not isinstance(other, ItemLocation): return False return self.__dict__ == other.__dict__ def", "noqa: E501 self._address_line1 = None self._address_line2 = None self._city =", "in definition. \"\"\" swagger_types = { 'address_line1': 'str', 'address_line2': 'str',", "as an apartment or suite number. # noqa: E501 :return:", "ItemLocation(object): \"\"\"NOTE: This class is auto generated by the swagger", "documentation</a> # noqa: E501 :param country: The country of this", "noqa: F401 import six class ItemLocation(object): \"\"\"NOTE: This class is", "\"\"\" self._address_line1 = address_line1 @property def address_line2(self): \"\"\"Gets the address_line2", "address_line2(self, address_line2): \"\"\"Sets the address_line2 of this ItemLocation. The second", "self.postal_code = postal_code if state_or_province is not None: self.state_or_province =", "# noqa: E501 :return: The address_line1 of this ItemLocation. #", "noqa: E501 \"\"\"ItemLocation - a model defined in Swagger\"\"\" #", "\"to_dict\") else item, value.items() )) else: result[attr] = value if", "noqa: E501 :rtype: str \"\"\" return self._address_line2 @address_line2.setter def address_line2(self,", "/><span class=\\\"tablenote\\\"> <b> Note: </b>Beginning in late January 2020, the", "state_or_province=None): # noqa: E501 \"\"\"ItemLocation - a model defined in", "# noqa: E501 :type: str \"\"\" self._address_line2 = address_line2 @property", "ability to search for sold items on eBay by keyword,", "E501 :rtype: str \"\"\" return self._address_line1 @address_line1.setter def address_line1(self, address_line1):", "address_line1): \"\"\"Sets the address_line1 of this ItemLocation. The first line", "self._postal_code @postal_code.setter def postal_code(self, postal_code): \"\"\"Sets the postal_code of this", "self._address_line1 = None self._address_line2 = None self._city = None self._country", "which the item is located. For implementation help, refer to", "postal_code of this ItemLocation. # noqa: E501 :rtype: str \"\"\"", "E501 The postal code (or zip code in US) where", "{ 'address_line1': 'str', 'address_line2': 'str', 'city': 'str', 'country': 'str', 'county':", "\"\"\" return self._country @country.setter def country(self, country): \"\"\"Sets the country", "dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1],", "# noqa: E501 :type: str \"\"\" self._state_or_province = state_or_province def", "# noqa: E501 :param address_line2: The address_line2 of this ItemLocation.", "# noqa: E501 :rtype: str \"\"\" return self._city @city.setter def", "both objects are equal\"\"\" if not isinstance(other, ItemLocation): return False", "address_line2 of this ItemLocation. The second line of the street", "number. # noqa: E501 :return: The address_line2 of this ItemLocation.", "ItemLocation. # noqa: E501 :type: str \"\"\" self._city = city", "ItemLocation. The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard code that indicates", ":rtype: str \"\"\" return self._postal_code @postal_code.setter def postal_code(self, postal_code): \"\"\"Sets", "first line of the street address. # noqa: E501 :return:", "E501 :return: The postal_code of this ItemLocation. # noqa: E501", "this ItemLocation. The first line of the street address. #", "def state_or_province(self): \"\"\"Gets the state_or_province of this ItemLocation. # noqa:", "hasattr(item[1], \"to_dict\") else item, value.items() )) else: result[attr] = value", "state_or_province of this ItemLocation. # noqa: E501 :rtype: str \"\"\"", "state or province in which the item is located. #", "for key, value in self.items(): result[key] = value return result", "ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._state_or_province @state_or_province.setter", "self._county @county.setter def county(self, county): \"\"\"Sets the county of this", "re # noqa: F401 import six class ItemLocation(object): \"\"\"NOTE: This", "== other.__dict__ def __ne__(self, other): \"\"\"Returns true if both objects", "self._county = None self._postal_code = None self._state_or_province = None self.discriminator", "is attribute name and the value is json key in", "ItemLocation. # noqa: E501 The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard", "self._address_line2 = address_line2 @property def city(self): \"\"\"Gets the city of", "standard code that indicates the country in which the item", "# coding: utf-8 \"\"\" Marketplace Insights API <a href=\\\"https://developer.ebay.com/api-docs/static/versioning.html#limited\\\" target=\\\"_blank\\\">", "<a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard code that indicates the country in", "= {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self,", "definition. \"\"\" swagger_types = { 'address_line1': 'str', 'address_line2': 'str', 'city':", "str \"\"\" self._postal_code = postal_code @property def state_or_province(self): \"\"\"Gets the", "\"\"\"Returns true if both objects are not equal\"\"\" return not", "\"\"\" return self._address_line2 @address_line2.setter def address_line2(self, address_line2): \"\"\"Sets the address_line2", "address_line2(self): \"\"\"Gets the address_line2 of this ItemLocation. # noqa: E501", "self.discriminator = None if address_line1 is not None: self.address_line1 =", "{ 'address_line1': 'addressLine1', 'address_line2': 'addressLine2', 'city': 'city', 'country': 'country', 'county':", "\"\"\" self._city = city @property def country(self): \"\"\"Gets the country", "address_line1(self, address_line1): \"\"\"Sets the address_line1 of this ItemLocation. The first", "representation of the model\"\"\" return pprint.pformat(self.to_dict()) def __repr__(self): \"\"\"For `print`", "by keyword, GTIN, category, and product and returns the of", "type. attribute_map (dict): The key is attribute name and the", "title=\\\"Limited Release\\\" alt=\\\"Limited Release\\\" />(Limited Release)</a> The Marketplace Insights API", "this ItemLocation. # noqa: E501 The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a>", "if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"):", "\"\"\"Sets the city of this ItemLocation. The city in which", "E501 :rtype: str \"\"\" return self._county @county.setter def county(self, county):", "item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else: result[attr]", "\"\"\" self._postal_code = postal_code @property def state_or_province(self): \"\"\"Gets the state_or_province", "= country if county is not None: self.county = county", "return pprint.pformat(self.to_dict()) def __repr__(self): \"\"\"For `print` and `pprint`\"\"\" return self.to_str()", "# noqa: E501 The first line of the street address.", "to_str(self): \"\"\"Returns the string representation of the model\"\"\" return pprint.pformat(self.to_dict())", ":return: The address_line1 of this ItemLocation. # noqa: E501 :rtype:", "# noqa: E501 The city in which the item is", "E501 The first line of the street address. # noqa:", "items. # noqa: E501 OpenAPI spec version: v1_beta.2.2 Generated by:", "ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._city @city.setter", "in US) where the item is located.<br /> <br /><span", "self._country @country.setter def country(self, country): \"\"\"Sets the country of this", "= city if country is not None: self.country = country", "E501 :type: str \"\"\" self._county = county @property def postal_code(self):", "E501 :param state_or_province: The state_or_province of this ItemLocation. # noqa:", "src=\\\"/cms/img/docs/partners-api.svg\\\" class=\\\"legend-icon partners-icon\\\" title=\\\"Limited Release\\\" alt=\\\"Limited Release\\\" />(Limited Release)</a> The", "'country': 'str', 'county': 'str', 'postal_code': 'str', 'state_or_province': 'str' } attribute_map", "postal_code of this ItemLocation. The postal code (or zip code", "provides the ability to search for sold items on eBay", ":param address_line1: The address_line1 of this ItemLocation. # noqa: E501", "result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda", "Marketplace Insights API provides the ability to search for sold", "str \"\"\" return self._postal_code @postal_code.setter def postal_code(self, postal_code): \"\"\"Sets the", "noqa: E501 :return: The county of this ItemLocation. # noqa:", "The city of this ItemLocation. # noqa: E501 :rtype: str", "is located. # noqa: E501 :return: The city of this", "state_or_province of this ItemLocation. The state or province in which", "located. # noqa: E501 :param city: The city of this", "<a href=\\\"https://developer.ebay.com/api-docs/static/versioning.html#limited\\\" target=\\\"_blank\\\"> <img src=\\\"/cms/img/docs/partners-api.svg\\\" class=\\\"legend-icon partners-icon\\\" title=\\\"Limited Release\\\" alt=\\\"Limited", "Insights API provides the ability to search for sold items", "noqa: E501 The first line of the street address. #", "noqa: E501 :type: str \"\"\" self._city = city @property def", "city of this ItemLocation. The city in which the item", "self.county = county if postal_code is not None: self.postal_code =", "= country @property def county(self): \"\"\"Gets the county of this", "address_line1 of this ItemLocation. The first line of the street", "of this ItemLocation. # noqa: E501 :type: str \"\"\" self._postal_code", "The city of this ItemLocation. # noqa: E501 :type: str", "first line of the street address. # noqa: E501 :param", "= address_line2 @property def city(self): \"\"\"Gets the city of this", "that indicates the country in which the item is located.", ":return: The address_line2 of this ItemLocation. # noqa: E501 :rtype:", "'addressLine1', 'address_line2': 'addressLine2', 'city': 'city', 'country': 'country', 'county': 'county', 'postal_code':", "str \"\"\" self._address_line1 = address_line1 @property def address_line2(self): \"\"\"Gets the", "# noqa: E501 self._address_line1 = None self._address_line2 = None self._city", ":type: str \"\"\" self._address_line1 = address_line1 @property def address_line2(self): \"\"\"Gets", "two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard code that indicates the country", "this ItemLocation. # noqa: E501 The postal code (or zip", "'city': 'str', 'country': 'str', 'county': 'str', 'postal_code': 'str', 'state_or_province': 'str'", "ItemLocation. The city in which the item is located. #", "noqa: E501 :rtype: str \"\"\" return self._country @country.setter def country(self,", "ItemLocation. # noqa: E501 :type: str \"\"\" self._postal_code = postal_code", "else: result[attr] = value if issubclass(ItemLocation, dict): for key, value", "is located. # noqa: E501 :return: The state_or_province of this", "return self._country @country.setter def country(self, country): \"\"\"Sets the country of", "'country': 'country', 'county': 'county', 'postal_code': 'postalCode', 'state_or_province': 'stateOrProvince' } def", "# noqa: E501 :type: str \"\"\" self._postal_code = postal_code @property", "objects are equal\"\"\" if not isinstance(other, ItemLocation): return False return", "this ItemLocation. # noqa: E501 The city in which the", "def postal_code(self): \"\"\"Gets the postal_code of this ItemLocation. # noqa:", "'address_line2': 'str', 'city': 'str', 'country': 'str', 'county': 'str', 'postal_code': 'str',", "item is located. # noqa: E501 :return: The city of", "an apartment or suite number. # noqa: E501 :param address_line2:", "address_line2 of this ItemLocation. # noqa: E501 :rtype: str \"\"\"", "\"\"\" return self._county @county.setter def county(self, county): \"\"\"Sets the county", "this ItemLocation. # noqa: E501 The county in which the", "/> <br /><span class=\\\"tablenote\\\"> <b> Note: </b>Beginning in late January", "the displayed postal code will be masked to all users.", ":rtype: str \"\"\" return self._city @city.setter def city(self, city): \"\"\"Sets", "postal_code(self): \"\"\"Gets the postal_code of this ItemLocation. # noqa: E501", "slightly different ways, but an example would be <code>951**</code>.</span> #", "the street address. # noqa: E501 :param address_line1: The address_line1", "state_or_province def to_dict(self): \"\"\"Returns the model properties as a dict\"\"\"", "= None self._country = None self._county = None self._postal_code =", "for sold items on eBay by keyword, GTIN, category, and", "street address. # noqa: E501 :return: The address_line1 of this", "is located. # noqa: E501 :param county: The county of", "address. This field may contain such values as an apartment", "of this ItemLocation. # noqa: E501 The postal code (or", "program. Do not edit the class manually. \"\"\" \"\"\" Attributes:", "as an apartment or suite number. # noqa: E501 :param", "which the item is located. # noqa: E501 :param county:", ":rtype: str \"\"\" return self._state_or_province @state_or_province.setter def state_or_province(self, state_or_province): \"\"\"Sets", "E501 :rtype: str \"\"\" return self._address_line2 @address_line2.setter def address_line2(self, address_line2):", "this ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._address_line1", "class is auto generated by the swagger code generator program.", "the of sales history of those items. # noqa: E501", "not edit the class manually. \"\"\" \"\"\" Attributes: swagger_types (dict):", "in which the item is located. For implementation help, refer", "if not isinstance(other, ItemLocation): return False return self.__dict__ == other.__dict__", "an example would be <code>951**</code>.</span> # noqa: E501 :return: The", "OpenAPI spec version: v1_beta.2.2 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" import pprint", "of this ItemLocation. # noqa: E501 The state or province", "list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\")", "the city of this ItemLocation. The city in which the", "value if issubclass(ItemLocation, dict): for key, value in self.items(): result[key]", "def address_line2(self): \"\"\"Gets the address_line2 of this ItemLocation. # noqa:", "apartment or suite number. # noqa: E501 :return: The address_line2", "The country of this ItemLocation. # noqa: E501 :type: str", "field may contain such values as an apartment or suite", "values as an apartment or suite number. # noqa: E501", "ItemLocation. # noqa: E501 :type: str \"\"\" self._address_line2 = address_line2", "this ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._city", "self._address_line1 = address_line1 @property def address_line2(self): \"\"\"Gets the address_line2 of", "\"\"\"Gets the address_line2 of this ItemLocation. # noqa: E501 The", "located. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a>", "For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> #", "a dict\"\"\" result = {} for attr, _ in six.iteritems(self.swagger_types):", "province in which the item is located. # noqa: E501", "address. # noqa: E501 :return: The address_line1 of this ItemLocation.", "issubclass(ItemLocation, dict): for key, value in self.items(): result[key] = value", "@city.setter def city(self, city): \"\"\"Sets the city of this ItemLocation.", "swagger code generator program. Do not edit the class manually.", "key is attribute name and the value is attribute type.", "\"\"\" swagger_types = { 'address_line1': 'str', 'address_line2': 'str', 'city': 'str',", "item is located. # noqa: E501 :return: The county of", "country of this ItemLocation. # noqa: E501 :type: str \"\"\"", "code (or zip code in US) where the item is", "self._state_or_province = state_or_province def to_dict(self): \"\"\"Returns the model properties as", "ItemLocation. The second line of the street address. This field", "'state_or_province': 'stateOrProvince' } def __init__(self, address_line1=None, address_line2=None, city=None, country=None, county=None,", "noqa: E501 :rtype: str \"\"\" return self._address_line1 @address_line1.setter def address_line1(self,", "return self._county @county.setter def county(self, county): \"\"\"Sets the county of", "self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"Returns true if both", "'addressLine2', 'city': 'city', 'country': 'country', 'county': 'county', 'postal_code': 'postalCode', 'state_or_province':", "https://github.com/swagger-api/swagger-codegen.git \"\"\" import pprint import re # noqa: F401 import", "address_line2): \"\"\"Sets the address_line2 of this ItemLocation. The second line", "mask postal/zip codes in slightly different ways, but an example", "E501 :param postal_code: The postal_code of this ItemLocation. # noqa:", "item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() ))", ":type: str \"\"\" self._address_line2 = address_line2 @property def city(self): \"\"\"Gets", "str \"\"\" return self._country @country.setter def country(self, country): \"\"\"Sets the", "\"\"\"Returns the model properties as a dict\"\"\" result = {}", "API provides the ability to search for sold items on", "true if both objects are equal\"\"\" if not isinstance(other, ItemLocation):", "def to_str(self): \"\"\"Returns the string representation of the model\"\"\" return", "of the street address. # noqa: E501 :param address_line1: The", "example would be <code>951**</code>.</span> # noqa: E501 :param postal_code: The", "country(self): \"\"\"Gets the country of this ItemLocation. # noqa: E501", ":type: str \"\"\" self._city = city @property def country(self): \"\"\"Gets", "postal_code=None, state_or_province=None): # noqa: E501 \"\"\"ItemLocation - a model defined", "ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._address_line1 @address_line1.setter", "href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :param country: The country", "`print` and `pprint`\"\"\" return self.to_str() def __eq__(self, other): \"\"\"Returns true", "name and the value is attribute type. attribute_map (dict): The", "spec version: v1_beta.2.2 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" import pprint import", "this ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._address_line2", "str \"\"\" self._county = county @property def postal_code(self): \"\"\"Gets the", "ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._address_line2 @address_line2.setter", "GTIN, category, and product and returns the of sales history", "of this ItemLocation. # noqa: E501 :type: str \"\"\" self._city", "country in which the item is located. For implementation help,", "import pprint import re # noqa: F401 import six class", "for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if", "\"\"\"Returns the string representation of the model\"\"\" return pprint.pformat(self.to_dict()) def", "def country(self): \"\"\"Gets the country of this ItemLocation. # noqa:", "self.address_line2 = address_line2 if city is not None: self.city =", "= list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,", "number. # noqa: E501 :param address_line2: The address_line2 of this", "ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._country @country.setter", "<code>951**</code>.</span> # noqa: E501 :param postal_code: The postal_code of this", "The address_line1 of this ItemLocation. # noqa: E501 :type: str", "\"\"\"Sets the address_line2 of this ItemLocation. The second line of", "suite number. # noqa: E501 :param address_line2: The address_line2 of", "The key is attribute name and the value is json", "} def __init__(self, address_line1=None, address_line2=None, city=None, country=None, county=None, postal_code=None, state_or_province=None):", "of the street address. # noqa: E501 :return: The address_line1", "manually. \"\"\" \"\"\" Attributes: swagger_types (dict): The key is attribute", "is not None: self.county = county if postal_code is not", "or province in which the item is located. # noqa:", "@property def state_or_province(self): \"\"\"Gets the state_or_province of this ItemLocation. #", "/>(Limited Release)</a> The Marketplace Insights API provides the ability to", ":return: The city of this ItemLocation. # noqa: E501 :rtype:", "# noqa: E501 :type: str \"\"\" self._county = county @property", "this ItemLocation. # noqa: E501 The state or province in", "noqa: E501 OpenAPI spec version: v1_beta.2.2 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\"", "@address_line1.setter def address_line1(self, address_line1): \"\"\"Sets the address_line1 of this ItemLocation.", "E501 :param county: The county of this ItemLocation. # noqa:", "None if address_line1 is not None: self.address_line1 = address_line1 if", "county): \"\"\"Sets the county of this ItemLocation. The county in", "E501 :rtype: str \"\"\" return self._state_or_province @state_or_province.setter def state_or_province(self, state_or_province):", "else item, value.items() )) else: result[attr] = value if issubclass(ItemLocation,", "The state_or_province of this ItemLocation. # noqa: E501 :rtype: str", "implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa:", "getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x:", "city=None, country=None, county=None, postal_code=None, state_or_province=None): # noqa: E501 \"\"\"ItemLocation -", "this ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._county", "county of this ItemLocation. The county in which the item", "county: The county of this ItemLocation. # noqa: E501 :type:", "ways, but an example would be <code>951**</code>.</span> # noqa: E501", "are equal\"\"\" if not isinstance(other, ItemLocation): return False return self.__dict__", "late January 2020, the displayed postal code will be masked", "The Marketplace Insights API provides the ability to search for", "street address. # noqa: E501 :param address_line1: The address_line1 of", "this ItemLocation. # noqa: E501 :type: str \"\"\" self._county =", "def postal_code(self, postal_code): \"\"\"Sets the postal_code of this ItemLocation. The", "# noqa: E501 :rtype: str \"\"\" return self._county @county.setter def", "hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] =", "def __init__(self, address_line1=None, address_line2=None, city=None, country=None, county=None, postal_code=None, state_or_province=None): #", "# noqa: E501 :return: The country of this ItemLocation. #", "if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if", "E501 :type: str \"\"\" self._postal_code = postal_code @property def state_or_province(self):", "noqa: E501 :type: str \"\"\" self._state_or_province = state_or_province def to_dict(self):", "city of this ItemLocation. # noqa: E501 :type: str \"\"\"", "auto generated by the swagger code generator program. Do not", ":return: The country of this ItemLocation. # noqa: E501 :rtype:", "postal_code is not None: self.postal_code = postal_code if state_or_province is", "ItemLocation. The state or province in which the item is", "\"\"\"Gets the country of this ItemLocation. # noqa: E501 The", "county in which the item is located. # noqa: E501", "# noqa: E501 :param city: The city of this ItemLocation.", "this ItemLocation. # noqa: E501 :type: str \"\"\" self._country =", "def __eq__(self, other): \"\"\"Returns true if both objects are equal\"\"\"", "ItemLocation. # noqa: E501 :type: str \"\"\" self._state_or_province = state_or_province", "state_or_province of this ItemLocation. # noqa: E501 :type: str \"\"\"", "the address_line2 of this ItemLocation. The second line of the", "and returns the of sales history of those items. #", "of the model\"\"\" return pprint.pformat(self.to_dict()) def __repr__(self): \"\"\"For `print` and", "attribute name and the value is json key in definition.", "line of the street address. # noqa: E501 :param address_line1:", "(dict): The key is attribute name and the value is", ")) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict):", "the value is json key in definition. \"\"\" swagger_types =", ":param county: The county of this ItemLocation. # noqa: E501", "or suite number. # noqa: E501 :param address_line2: The address_line2", "is located.<br /> <br /><span class=\\\"tablenote\\\"> <b> Note: </b>Beginning in", "API <a href=\\\"https://developer.ebay.com/api-docs/static/versioning.html#limited\\\" target=\\\"_blank\\\"> <img src=\\\"/cms/img/docs/partners-api.svg\\\" class=\\\"legend-icon partners-icon\\\" title=\\\"Limited Release\\\"", "the country of this ItemLocation. # noqa: E501 The two-letter", "@address_line2.setter def address_line2(self, address_line2): \"\"\"Sets the address_line2 of this ItemLocation.", "\"\"\" \"\"\" Attributes: swagger_types (dict): The key is attribute name", "located.<br /> <br /><span class=\\\"tablenote\\\"> <b> Note: </b>Beginning in late", "3166</a> standard code that indicates the country in which the", "the item is located. # noqa: E501 :param county: The", "coding: utf-8 \"\"\" Marketplace Insights API <a href=\\\"https://developer.ebay.com/api-docs/static/versioning.html#limited\\\" target=\\\"_blank\\\"> <img", "noqa: E501 The state or province in which the item", "(item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else:", "address_line1 is not None: self.address_line1 = address_line1 if address_line2 is", "E501 :return: The city of this ItemLocation. # noqa: E501", "None self._state_or_province = None self.discriminator = None if address_line1 is", ":param address_line2: The address_line2 of this ItemLocation. # noqa: E501", "'state_or_province': 'str' } attribute_map = { 'address_line1': 'addressLine1', 'address_line2': 'addressLine2',", "The postal_code of this ItemLocation. # noqa: E501 :type: str", "result = {} for attr, _ in six.iteritems(self.swagger_types): value =", "those items. # noqa: E501 OpenAPI spec version: v1_beta.2.2 Generated", "None self._address_line2 = None self._city = None self._country = None", "href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :return: The country of", "ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._county @county.setter", "located. # noqa: E501 :param county: The county of this", "which the item is located. # noqa: E501 :param city:", "__ne__(self, other): \"\"\"Returns true if both objects are not equal\"\"\"", "swagger_types = { 'address_line1': 'str', 'address_line2': 'str', 'city': 'str', 'country':", "\"\"\"ItemLocation - a model defined in Swagger\"\"\" # noqa: E501", "the city of this ItemLocation. # noqa: E501 The city", "'address_line1': 'addressLine1', 'address_line2': 'addressLine2', 'city': 'city', 'country': 'country', 'county': 'county',", "@county.setter def county(self, county): \"\"\"Sets the county of this ItemLocation.", "# noqa: E501 :rtype: str \"\"\" return self._postal_code @postal_code.setter def", "and the value is json key in definition. \"\"\" swagger_types", ":param state_or_province: The state_or_province of this ItemLocation. # noqa: E501", "= None if address_line1 is not None: self.address_line1 = address_line1", "\"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map(", "if both objects are equal\"\"\" if not isinstance(other, ItemLocation): return", "The postal_code of this ItemLocation. # noqa: E501 :rtype: str", "the postal_code of this ItemLocation. The postal code (or zip", "E501 The county in which the item is located. #", "on eBay by keyword, GTIN, category, and product and returns", "item is located. # noqa: E501 :param city: The city", "model properties as a dict\"\"\" result = {} for attr,", "noqa: E501 The city in which the item is located.", "this ItemLocation. The county in which the item is located.", "@property def city(self): \"\"\"Gets the city of this ItemLocation. #", "class=\\\"tablenote\\\"> <b> Note: </b>Beginning in late January 2020, the displayed", "return self._postal_code @postal_code.setter def postal_code(self, postal_code): \"\"\"Sets the postal_code of", "def __ne__(self, other): \"\"\"Returns true if both objects are not", "where the item is located.<br /> <br /><span class=\\\"tablenote\\\"> <b>", "# noqa: E501 :rtype: str \"\"\" return self._address_line2 @address_line2.setter def", "The city in which the item is located. # noqa:", "'postal_code': 'postalCode', 'state_or_province': 'stateOrProvince' } def __init__(self, address_line1=None, address_line2=None, city=None,", "= dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else", "county of this ItemLocation. # noqa: E501 :type: str \"\"\"", "Release)</a> The Marketplace Insights API provides the ability to search", "result[attr] = value if issubclass(ItemLocation, dict): for key, value in", "not None: self.postal_code = postal_code if state_or_province is not None:", "the street address. # noqa: E501 :return: The address_line1 of", "# noqa: E501 :type: str \"\"\" self._city = city @property", "self._state_or_province @state_or_province.setter def state_or_province(self, state_or_province): \"\"\"Sets the state_or_province of this", "# noqa: E501 :param state_or_province: The state_or_province of this ItemLocation.", "E501 The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard code that indicates", "city(self, city): \"\"\"Sets the city of this ItemLocation. The city", "2020, the displayed postal code will be masked to all", "address_line1 of this ItemLocation. # noqa: E501 :type: str \"\"\"", "county of this ItemLocation. # noqa: E501 :rtype: str \"\"\"", "postal_code(self, postal_code): \"\"\"Sets the postal_code of this ItemLocation. The postal", "self._state_or_province = None self.discriminator = None if address_line1 is not", "the item is located. # noqa: E501 :param city: The", "this ItemLocation. The state or province in which the item", "# noqa: E501 :return: The address_line2 of this ItemLocation. #", "of this ItemLocation. # noqa: E501 The county in which", "E501 :param city: The city of this ItemLocation. # noqa:", "the postal_code of this ItemLocation. # noqa: E501 The postal", "<a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :param country: The", ":type: str \"\"\" self._state_or_province = state_or_province def to_dict(self): \"\"\"Returns the", "key is attribute name and the value is json key", "# noqa: E501 :return: The postal_code of this ItemLocation. #", "generator program. Do not edit the class manually. \"\"\" \"\"\"", "= state_or_province def to_dict(self): \"\"\"Returns the model properties as a", "E501 self._address_line1 = None self._address_line2 = None self._city = None", "= None self._postal_code = None self._state_or_province = None self.discriminator =", "address_line2 is not None: self.address_line2 = address_line2 if city is", "str \"\"\" return self._address_line2 @address_line2.setter def address_line2(self, address_line2): \"\"\"Sets the", "None self._postal_code = None self._state_or_province = None self.discriminator = None", "# noqa: E501 OpenAPI spec version: v1_beta.2.2 Generated by: https://github.com/swagger-api/swagger-codegen.git", "ItemLocation. # noqa: E501 :type: str \"\"\" self._country = country", "the item is located. # noqa: E501 :param state_or_province: The", "ItemLocation. The first line of the street address. # noqa:", "E501 :param address_line2: The address_line2 of this ItemLocation. # noqa:", "href=\\\"https://developer.ebay.com/api-docs/static/versioning.html#limited\\\" target=\\\"_blank\\\"> <img src=\\\"/cms/img/docs/partners-api.svg\\\" class=\\\"legend-icon partners-icon\\\" title=\\\"Limited Release\\\" alt=\\\"Limited Release\\\"", "state_or_province(self): \"\"\"Gets the state_or_province of this ItemLocation. # noqa: E501", "noqa: E501 :return: The country of this ItemLocation. # noqa:", "if both objects are not equal\"\"\" return not self ==", "to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :return: The", ":type: str \"\"\" self._postal_code = postal_code @property def state_or_province(self): \"\"\"Gets", "The address_line2 of this ItemLocation. # noqa: E501 :type: str", "of this ItemLocation. The city in which the item is", "def __repr__(self): \"\"\"For `print` and `pprint`\"\"\" return self.to_str() def __eq__(self,", "and `pprint`\"\"\" return self.to_str() def __eq__(self, other): \"\"\"Returns true if", "noqa: E501 :type: str \"\"\" self._county = county @property def", "not None: self.state_or_province = state_or_province @property def address_line1(self): \"\"\"Gets the", "# noqa: E501 The postal code (or zip code in", "postal code will be masked to all users. Different countries", "this ItemLocation. # noqa: E501 :type: str \"\"\" self._state_or_province =", "list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value", "@property def country(self): \"\"\"Gets the country of this ItemLocation. #", "address_line2=None, city=None, country=None, county=None, postal_code=None, state_or_province=None): # noqa: E501 \"\"\"ItemLocation", "the country of this ItemLocation. The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a>", "Insights API <a href=\\\"https://developer.ebay.com/api-docs/static/versioning.html#limited\\\" target=\\\"_blank\\\"> <img src=\\\"/cms/img/docs/partners-api.svg\\\" class=\\\"legend-icon partners-icon\\\" title=\\\"Limited", "None: self.address_line1 = address_line1 if address_line2 is not None: self.address_line2", "The address_line1 of this ItemLocation. # noqa: E501 :rtype: str", "self.items(): result[key] = value return result def to_str(self): \"\"\"Returns the", "target=\\\"_blank\\\"> <img src=\\\"/cms/img/docs/partners-api.svg\\\" class=\\\"legend-icon partners-icon\\\" title=\\\"Limited Release\\\" alt=\\\"Limited Release\\\" />(Limited", "The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard code that indicates the", "in Swagger\"\"\" # noqa: E501 self._address_line1 = None self._address_line2 =", "\"\"\"Returns true if both objects are equal\"\"\" if not isinstance(other,", "address_line1 of this ItemLocation. # noqa: E501 The first line", "of this ItemLocation. The postal code (or zip code in", "line of the street address. This field may contain such", "address_line1(self): \"\"\"Gets the address_line1 of this ItemLocation. # noqa: E501", ":param postal_code: The postal_code of this ItemLocation. # noqa: E501", "{} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr)", "\"\"\" Attributes: swagger_types (dict): The key is attribute name and", "\"\"\"Gets the city of this ItemLocation. # noqa: E501 The", "other): \"\"\"Returns true if both objects are not equal\"\"\" return", "items on eBay by keyword, GTIN, category, and product and", "isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x,", "country of this ItemLocation. # noqa: E501 The two-letter <a", "= county if postal_code is not None: self.postal_code = postal_code", "# noqa: E501 :return: The city of this ItemLocation. #", "result[key] = value return result def to_str(self): \"\"\"Returns the string", "import six class ItemLocation(object): \"\"\"NOTE: This class is auto generated", "def to_dict(self): \"\"\"Returns the model properties as a dict\"\"\" result", "the street address. This field may contain such values as", "returns the of sales history of those items. # noqa:", "# noqa: E501 :param address_line1: The address_line1 of this ItemLocation.", "or suite number. # noqa: E501 :return: The address_line2 of", "of this ItemLocation. # noqa: E501 :type: str \"\"\" self._address_line2", "<b> Note: </b>Beginning in late January 2020, the displayed postal", "return False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"Returns", "= None self._county = None self._postal_code = None self._state_or_province =", "not None: self.address_line1 = address_line1 if address_line2 is not None:", "str \"\"\" self._country = country @property def county(self): \"\"\"Gets the", "is json key in definition. \"\"\" swagger_types = { 'address_line1':", "address_line2 @property def city(self): \"\"\"Gets the city of this ItemLocation.", "'str', 'state_or_province': 'str' } attribute_map = { 'address_line1': 'addressLine1', 'address_line2':", "city(self): \"\"\"Gets the city of this ItemLocation. # noqa: E501", "an example would be <code>951**</code>.</span> # noqa: E501 :param postal_code:", "# noqa: E501 The state or province in which the", "in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr]", "country(self, country): \"\"\"Sets the country of this ItemLocation. The two-letter", "state_or_province of this ItemLocation. # noqa: E501 The state or", "if issubclass(ItemLocation, dict): for key, value in self.items(): result[key] =", "__eq__(self, other): \"\"\"Returns true if both objects are equal\"\"\" if", "= getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda", ":return: The county of this ItemLocation. # noqa: E501 :rtype:", "city of this ItemLocation. # noqa: E501 The city in", "'address_line1': 'str', 'address_line2': 'str', 'city': 'str', 'country': 'str', 'county': 'str',", "= address_line2 if city is not None: self.city = city", "indicates the country in which the item is located. For", "address_line1 if address_line2 is not None: self.address_line2 = address_line2 if", "be <code>951**</code>.</span> # noqa: E501 :return: The postal_code of this", "None: self.county = county if postal_code is not None: self.postal_code", "state_or_province): \"\"\"Sets the state_or_province of this ItemLocation. The state or", "noqa: E501 :type: str \"\"\" self._address_line1 = address_line1 @property def", "value return result def to_str(self): \"\"\"Returns the string representation of", "to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :param country:", "in self.items(): result[key] = value return result def to_str(self): \"\"\"Returns", "masked to all users. Different countries will mask postal/zip codes", "address_line1: The address_line1 of this ItemLocation. # noqa: E501 :type:", "if postal_code is not None: self.postal_code = postal_code if state_or_province", "if address_line1 is not None: self.address_line1 = address_line1 if address_line2", "self._postal_code = postal_code @property def state_or_province(self): \"\"\"Gets the state_or_province of", "str \"\"\" self._state_or_province = state_or_province def to_dict(self): \"\"\"Returns the model", "other.__dict__ def __ne__(self, other): \"\"\"Returns true if both objects are", "@country.setter def country(self, country): \"\"\"Sets the country of this ItemLocation.", "E501 :return: The country of this ItemLocation. # noqa: E501", "of those items. # noqa: E501 OpenAPI spec version: v1_beta.2.2", "address_line1 @property def address_line2(self): \"\"\"Gets the address_line2 of this ItemLocation.", "self._address_line2 @address_line2.setter def address_line2(self, address_line2): \"\"\"Sets the address_line2 of this", "= postal_code @property def state_or_province(self): \"\"\"Gets the state_or_province of this", "of this ItemLocation. # noqa: E501 The first line of", "search for sold items on eBay by keyword, GTIN, category,", "city: The city of this ItemLocation. # noqa: E501 :type:", "the address_line2 of this ItemLocation. # noqa: E501 The second", "None self.discriminator = None if address_line1 is not None: self.address_line1", "str \"\"\" return self._county @county.setter def county(self, county): \"\"\"Sets the", ":return: The state_or_province of this ItemLocation. # noqa: E501 :rtype:", "else x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict()", "The state or province in which the item is located.", "'postalCode', 'state_or_province': 'stateOrProvince' } def __init__(self, address_line1=None, address_line2=None, city=None, country=None,", "def address_line1(self, address_line1): \"\"\"Sets the address_line1 of this ItemLocation. The", "E501 :param address_line1: The address_line1 of this ItemLocation. # noqa:", "country of this ItemLocation. # noqa: E501 :rtype: str \"\"\"", "postal_code of this ItemLocation. # noqa: E501 The postal code", "address_line2 of this ItemLocation. # noqa: E501 :type: str \"\"\"", "swagger_types (dict): The key is attribute name and the value", "= value return result def to_str(self): \"\"\"Returns the string representation", "noqa: E501 :param address_line2: The address_line2 of this ItemLocation. #", "'str', 'address_line2': 'str', 'city': 'str', 'country': 'str', 'county': 'str', 'postal_code':", "ItemLocation. # noqa: E501 The state or province in which", "if country is not None: self.country = country if county", "noqa: E501 The county in which the item is located.", "= None self._city = None self._country = None self._county =", "country is not None: self.country = country if county is", "postal_code of this ItemLocation. # noqa: E501 :type: str \"\"\"", "'str', 'postal_code': 'str', 'state_or_province': 'str' } attribute_map = { 'address_line1':", "= address_line1 @property def address_line2(self): \"\"\"Gets the address_line2 of this", "example would be <code>951**</code>.</span> # noqa: E501 :return: The postal_code", "alt=\\\"Limited Release\\\" />(Limited Release)</a> The Marketplace Insights API provides the", "\"\"\" self._state_or_province = state_or_province def to_dict(self): \"\"\"Returns the model properties", "The postal code (or zip code in US) where the", "county(self): \"\"\"Gets the county of this ItemLocation. # noqa: E501", "will mask postal/zip codes in slightly different ways, but an", "code in US) where the item is located.<br /> <br", ")) else: result[attr] = value if issubclass(ItemLocation, dict): for key,", "noqa: E501 :rtype: str \"\"\" return self._county @county.setter def county(self,", "= None self.discriminator = None if address_line1 is not None:", "None: self.postal_code = postal_code if state_or_province is not None: self.state_or_province", "# noqa: E501 :rtype: str \"\"\" return self._state_or_province @state_or_province.setter def", "the address_line1 of this ItemLocation. The first line of the", "x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif", "the county of this ItemLocation. The county in which the", "\"\"\"NOTE: This class is auto generated by the swagger code", "# noqa: E501 :param postal_code: The postal_code of this ItemLocation.", "country: The country of this ItemLocation. # noqa: E501 :type:", "noqa: E501 :param postal_code: The postal_code of this ItemLocation. #", "= address_line1 if address_line2 is not None: self.address_line2 = address_line2", "country if county is not None: self.county = county if", "January 2020, the displayed postal code will be masked to", "E501 :type: str \"\"\" self._country = country @property def county(self):", "of this ItemLocation. # noqa: E501 :rtype: str \"\"\" return", "zip code in US) where the item is located.<br />", "result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\")", "\"\"\"Sets the address_line1 of this ItemLocation. The first line of", "code that indicates the country in which the item is", "eBay by keyword, GTIN, category, and product and returns the", "is not None: self.state_or_province = state_or_province @property def address_line1(self): \"\"\"Gets", "noqa: E501 :return: The address_line1 of this ItemLocation. # noqa:", "street address. This field may contain such values as an", "keyword, GTIN, category, and product and returns the of sales", "documentation</a> # noqa: E501 :return: The country of this ItemLocation.", "(or zip code in US) where the item is located.<br", "item, value.items() )) else: result[attr] = value if issubclass(ItemLocation, dict):", "countries will mask postal/zip codes in slightly different ways, but", "postal/zip codes in slightly different ways, but an example would", "properties as a dict\"\"\" result = {} for attr, _", "edit the class manually. \"\"\" \"\"\" Attributes: swagger_types (dict): The", "would be <code>951**</code>.</span> # noqa: E501 :return: The postal_code of", "country @property def county(self): \"\"\"Gets the county of this ItemLocation.", "in slightly different ways, but an example would be <code>951**</code>.</span>", "E501 :return: The state_or_province of this ItemLocation. # noqa: E501", "'country', 'county': 'county', 'postal_code': 'postalCode', 'state_or_province': 'stateOrProvince' } def __init__(self,", "address_line2 of this ItemLocation. # noqa: E501 The second line", "E501 The second line of the street address. This field", "\"\"\" return self._postal_code @postal_code.setter def postal_code(self, postal_code): \"\"\"Sets the postal_code", "\"\"\"Gets the address_line1 of this ItemLocation. # noqa: E501 The", "located. # noqa: E501 :return: The city of this ItemLocation.", "would be <code>951**</code>.</span> # noqa: E501 :param postal_code: The postal_code", "\"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr] =", "item is located. # noqa: E501 :param county: The county", "may contain such values as an apartment or suite number.", "displayed postal code will be masked to all users. Different", "'city', 'country': 'country', 'county': 'county', 'postal_code': 'postalCode', 'state_or_province': 'stateOrProvince' }", "postal_code): \"\"\"Sets the postal_code of this ItemLocation. The postal code", ":rtype: str \"\"\" return self._county @county.setter def county(self, county): \"\"\"Sets", "isinstance(other, ItemLocation): return False return self.__dict__ == other.__dict__ def __ne__(self,", "'str', 'city': 'str', 'country': 'str', 'county': 'str', 'postal_code': 'str', 'state_or_province':", "item is located. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay", "ItemLocation. # noqa: E501 The second line of the street", "but an example would be <code>951**</code>.</span> # noqa: E501 :return:", "attribute name and the value is attribute type. attribute_map (dict):", "and product and returns the of sales history of those", "None self._country = None self._county = None self._postal_code = None", ":rtype: str \"\"\" return self._address_line1 @address_line1.setter def address_line1(self, address_line1): \"\"\"Sets", "None self._county = None self._postal_code = None self._state_or_province = None", "if city is not None: self.city = city if country", "in which the item is located. # noqa: E501 :return:", "city is not None: self.city = city if country is", "this ItemLocation. # noqa: E501 The first line of the", "country of this ItemLocation. The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard", "version: v1_beta.2.2 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" import pprint import re", "noqa: E501 :return: The city of this ItemLocation. # noqa:", "= city @property def country(self): \"\"\"Gets the country of this", "def state_or_province(self, state_or_province): \"\"\"Sets the state_or_province of this ItemLocation. The", ":param country: The country of this ItemLocation. # noqa: E501", "elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict())", "'str', 'country': 'str', 'county': 'str', 'postal_code': 'str', 'state_or_province': 'str' }", "different ways, but an example would be <code>951**</code>.</span> # noqa:", "the state_or_province of this ItemLocation. The state or province in", "the model properties as a dict\"\"\" result = {} for", "# noqa: E501 :rtype: str \"\"\" return self._country @country.setter def", "apartment or suite number. # noqa: E501 :param address_line2: The", "postal code (or zip code in US) where the item", "\"\"\"Gets the state_or_province of this ItemLocation. # noqa: E501 The", "the model\"\"\" return pprint.pformat(self.to_dict()) def __repr__(self): \"\"\"For `print` and `pprint`\"\"\"", "'stateOrProvince' } def __init__(self, address_line1=None, address_line2=None, city=None, country=None, county=None, postal_code=None,", "ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._postal_code @postal_code.setter", "noqa: E501 :param county: The county of this ItemLocation. #", "partners-icon\\\" title=\\\"Limited Release\\\" alt=\\\"Limited Release\\\" />(Limited Release)</a> The Marketplace Insights", "this ItemLocation. # noqa: E501 :type: str \"\"\" self._city =", "The second line of the street address. This field may", "The state_or_province of this ItemLocation. # noqa: E501 :type: str", "E501 :param country: The country of this ItemLocation. # noqa:", "x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value,", "None: self.city = city if country is not None: self.country", "Attributes: swagger_types (dict): The key is attribute name and the", "be <code>951**</code>.</span> # noqa: E501 :param postal_code: The postal_code of", "# noqa: E501 :return: The state_or_province of this ItemLocation. #", "of this ItemLocation. # noqa: E501 :type: str \"\"\" self._county", "this ItemLocation. The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard code that", "'county': 'str', 'postal_code': 'str', 'state_or_province': 'str' } attribute_map = {", "\"\"\"Sets the postal_code of this ItemLocation. The postal code (or", "by: https://github.com/swagger-api/swagger-codegen.git \"\"\" import pprint import re # noqa: F401", "# noqa: E501 :param county: The county of this ItemLocation.", "= value if issubclass(ItemLocation, dict): for key, value in self.items():", "is auto generated by the swagger code generator program. Do", "the country in which the item is located. For implementation", "value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0],", "which the item is located. # noqa: E501 :param state_or_province:", "ItemLocation): return False return self.__dict__ == other.__dict__ def __ne__(self, other):", "if hasattr(item[1], \"to_dict\") else item, value.items() )) else: result[attr] =", "F401 import six class ItemLocation(object): \"\"\"NOTE: This class is auto", "def address_line1(self): \"\"\"Gets the address_line1 of this ItemLocation. # noqa:", "self._country = None self._county = None self._postal_code = None self._state_or_province", "self.address_line1 = address_line1 if address_line2 is not None: self.address_line2 =", "of this ItemLocation. # noqa: E501 The second line of", "this ItemLocation. # noqa: E501 :type: str \"\"\" self._address_line1 =", "\"\"\" import pprint import re # noqa: F401 import six", "noqa: E501 :type: str \"\"\" self._address_line2 = address_line2 @property def", "a model defined in Swagger\"\"\" # noqa: E501 self._address_line1 =", "dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item,", "this ItemLocation. The city in which the item is located.", "v1_beta.2.2 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" import pprint import re #", "the item is located. For implementation help, refer to <a", "the item is located. # noqa: E501 :return: The city", "API documentation</a> # noqa: E501 :param country: The country of", "of sales history of those items. # noqa: E501 OpenAPI", "county=None, postal_code=None, state_or_province=None): # noqa: E501 \"\"\"ItemLocation - a model", "of this ItemLocation. # noqa: E501 :type: str \"\"\" self._address_line1", "of this ItemLocation. The first line of the street address.", "\"\"\" return self._city @city.setter def city(self, city): \"\"\"Sets the city", "E501 :type: str \"\"\" self._address_line2 = address_line2 @property def city(self):", "False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"Returns true", "str \"\"\" self._address_line2 = address_line2 @property def city(self): \"\"\"Gets the", "an apartment or suite number. # noqa: E501 :return: The", "in which the item is located. # noqa: E501 :param", "to search for sold items on eBay by keyword, GTIN,", "# noqa: E501 The county in which the item is", "@property def address_line1(self): \"\"\"Gets the address_line1 of this ItemLocation. #", "@property def county(self): \"\"\"Gets the county of this ItemLocation. #", "def county(self): \"\"\"Gets the county of this ItemLocation. # noqa:", "E501 The city in which the item is located. #", "= state_or_province @property def address_line1(self): \"\"\"Gets the address_line1 of this", "state_or_province(self, state_or_province): \"\"\"Sets the state_or_province of this ItemLocation. The state", "E501 :type: str \"\"\" self._address_line1 = address_line1 @property def address_line2(self):", "None: self.address_line2 = address_line2 if city is not None: self.city", "The first line of the street address. # noqa: E501", "value is attribute type. attribute_map (dict): The key is attribute", "return self.to_str() def __eq__(self, other): \"\"\"Returns true if both objects", "x: x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif", "state_or_province @property def address_line1(self): \"\"\"Gets the address_line1 of this ItemLocation.", "noqa: E501 The second line of the street address. This", "self._address_line1 @address_line1.setter def address_line1(self, address_line1): \"\"\"Sets the address_line1 of this", "noqa: E501 :rtype: str \"\"\" return self._state_or_province @state_or_province.setter def state_or_province(self,", "# noqa: E501 \"\"\"ItemLocation - a model defined in Swagger\"\"\"", "self._address_line2 = None self._city = None self._country = None self._county", "this ItemLocation. # noqa: E501 :type: str \"\"\" self._address_line2 =", "this ItemLocation. The postal code (or zip code in US)", "return self._city @city.setter def city(self, city): \"\"\"Sets the city of", "and the value is attribute type. attribute_map (dict): The key", "href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard code that indicates the country in which", "str \"\"\" return self._city @city.setter def city(self, city): \"\"\"Sets the", "Release\\\" alt=\\\"Limited Release\\\" />(Limited Release)</a> The Marketplace Insights API provides", "value is json key in definition. \"\"\" swagger_types = {", "US) where the item is located.<br /> <br /><span class=\\\"tablenote\\\">", "lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items()", "'str', 'county': 'str', 'postal_code': 'str', 'state_or_province': 'str' } attribute_map =", "in late January 2020, the displayed postal code will be", "is located. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API", "The address_line2 of this ItemLocation. # noqa: E501 :rtype: str", "def county(self, county): \"\"\"Sets the county of this ItemLocation. The", "E501 The state or province in which the item is", "such values as an apartment or suite number. # noqa:", "code will be masked to all users. Different countries will", "return self._address_line1 @address_line1.setter def address_line1(self, address_line1): \"\"\"Sets the address_line1 of", "None: self.state_or_province = state_or_province @property def address_line1(self): \"\"\"Gets the address_line1", ":param city: The city of this ItemLocation. # noqa: E501", "This field may contain such values as an apartment or", "noqa: E501 :param state_or_province: The state_or_province of this ItemLocation. #", "= value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item:", "E501 :return: The address_line2 of this ItemLocation. # noqa: E501", "if state_or_province is not None: self.state_or_province = state_or_province @property def", "noqa: E501 :rtype: str \"\"\" return self._city @city.setter def city(self,", "def city(self, city): \"\"\"Sets the city of this ItemLocation. The", "'address_line2': 'addressLine2', 'city': 'city', 'country': 'country', 'county': 'county', 'postal_code': 'postalCode',", "E501 \"\"\"ItemLocation - a model defined in Swagger\"\"\" # noqa:", "line of the street address. # noqa: E501 :return: The", "ItemLocation. # noqa: E501 The county in which the item", "return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"Returns true if", "def country(self, country): \"\"\"Sets the country of this ItemLocation. The", "dict): for key, value in self.items(): result[key] = value return", "country): \"\"\"Sets the country of this ItemLocation. The two-letter <a", "__init__(self, address_line1=None, address_line2=None, city=None, country=None, county=None, postal_code=None, state_or_province=None): # noqa:", "= { 'address_line1': 'addressLine1', 'address_line2': 'addressLine2', 'city': 'city', 'country': 'country',", "The country of this ItemLocation. # noqa: E501 :rtype: str", "of this ItemLocation. # noqa: E501 :type: str \"\"\" self._country", "the county of this ItemLocation. # noqa: E501 The county", "noqa: E501 :param address_line1: The address_line1 of this ItemLocation. #", "of this ItemLocation. The state or province in which the", "sold items on eBay by keyword, GTIN, category, and product", "second line of the street address. This field may contain", "which the item is located. # noqa: E501 :return: The", "def address_line2(self, address_line2): \"\"\"Sets the address_line2 of this ItemLocation. The", "Note: </b>Beginning in late January 2020, the displayed postal code", "None: self.country = country if county is not None: self.county", "noqa: E501 :type: str \"\"\" self._country = country @property def", "postal_code: The postal_code of this ItemLocation. # noqa: E501 :type:", "state_or_province is not None: self.state_or_province = state_or_province @property def address_line1(self):", "value.items() )) else: result[attr] = value if issubclass(ItemLocation, dict): for", "# noqa: E501 :rtype: str \"\"\" return self._address_line1 @address_line1.setter def", "noqa: E501 :param country: The country of this ItemLocation. #", "of this ItemLocation. The county in which the item is", "contain such values as an apartment or suite number. #", "the class manually. \"\"\" \"\"\" Attributes: swagger_types (dict): The key", "def city(self): \"\"\"Gets the city of this ItemLocation. # noqa:", "model defined in Swagger\"\"\" # noqa: E501 self._address_line1 = None", "the swagger code generator program. Do not edit the class", "product and returns the of sales history of those items.", "ItemLocation. # noqa: E501 :type: str \"\"\" self._address_line1 = address_line1", "# noqa: E501 :return: The county of this ItemLocation. #", "suite number. # noqa: E501 :return: The address_line2 of this", "equal\"\"\" if not isinstance(other, ItemLocation): return False return self.__dict__ ==", "as a dict\"\"\" result = {} for attr, _ in", "self.state_or_province = state_or_province @property def address_line1(self): \"\"\"Gets the address_line1 of", "address_line2 if city is not None: self.city = city if", "this ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._country", "E501 :return: The county of this ItemLocation. # noqa: E501", "history of those items. # noqa: E501 OpenAPI spec version:", "postal_code if state_or_province is not None: self.state_or_province = state_or_province @property", "# noqa: E501 :type: str \"\"\" self._country = country @property", "located. # noqa: E501 :param state_or_province: The state_or_province of this", "country=None, county=None, postal_code=None, state_or_province=None): # noqa: E501 \"\"\"ItemLocation - a", "This class is auto generated by the swagger code generator", "noqa: E501 :rtype: str \"\"\" return self._postal_code @postal_code.setter def postal_code(self,", "generated by the swagger code generator program. Do not edit", "ItemLocation. # noqa: E501 The postal code (or zip code", "isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if", "str \"\"\" return self._address_line1 @address_line1.setter def address_line1(self, address_line1): \"\"\"Sets the", "this ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._state_or_province", "of this ItemLocation. The second line of the street address.", "API documentation</a> # noqa: E501 :return: The country of this", "users. Different countries will mask postal/zip codes in slightly different", "pprint import re # noqa: F401 import six class ItemLocation(object):", "'county': 'county', 'postal_code': 'postalCode', 'state_or_province': 'stateOrProvince' } def __init__(self, address_line1=None,", "self._city = city @property def country(self): \"\"\"Gets the country of", "by the swagger code generator program. Do not edit the", "address_line2: The address_line2 of this ItemLocation. # noqa: E501 :type:", "noqa: E501 :param city: The city of this ItemLocation. #", "address_line1=None, address_line2=None, city=None, country=None, county=None, postal_code=None, state_or_province=None): # noqa: E501", "= county @property def postal_code(self): \"\"\"Gets the postal_code of this", "will be masked to all users. Different countries will mask", "this ItemLocation. # noqa: E501 :type: str \"\"\" self._postal_code =", "E501 :type: str \"\"\" self._state_or_province = state_or_province def to_dict(self): \"\"\"Returns", "result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else", "json key in definition. \"\"\" swagger_types = { 'address_line1': 'str',", "<img src=\\\"/cms/img/docs/partners-api.svg\\\" class=\\\"legend-icon partners-icon\\\" title=\\\"Limited Release\\\" alt=\\\"Limited Release\\\" />(Limited Release)</a>", "noqa: E501 The two-letter <a href=\\\"https://www.iso.org/iso-3166-country-codes.html\\\">ISO 3166</a> standard code that", "attribute_map (dict): The key is attribute name and the value", "str \"\"\" return self._state_or_province @state_or_province.setter def state_or_province(self, state_or_province): \"\"\"Sets the", "elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr]", "E501 :rtype: str \"\"\" return self._city @city.setter def city(self, city):", "hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr]", "is not None: self.postal_code = postal_code if state_or_province is not", "item is located. # noqa: E501 :param state_or_province: The state_or_province", "# noqa: F401 import six class ItemLocation(object): \"\"\"NOTE: This class", "None self._city = None self._country = None self._county = None", "self.city = city if country is not None: self.country =", "county if postal_code is not None: self.postal_code = postal_code if", "attribute type. attribute_map (dict): The key is attribute name and", "the item is located. # noqa: E501 :return: The county", "noqa: E501 The postal code (or zip code in US)", "sales history of those items. # noqa: E501 OpenAPI spec", "the string representation of the model\"\"\" return pprint.pformat(self.to_dict()) def __repr__(self):", "\"\"\" self._county = county @property def postal_code(self): \"\"\"Gets the postal_code", "item is located. # noqa: E501 :return: The state_or_province of", "city of this ItemLocation. # noqa: E501 :rtype: str \"\"\"", "true if both objects are not equal\"\"\" return not self", "@state_or_province.setter def state_or_province(self, state_or_province): \"\"\"Sets the state_or_province of this ItemLocation.", "address_line1 of this ItemLocation. # noqa: E501 :rtype: str \"\"\"", "return self._state_or_province @state_or_province.setter def state_or_province(self, state_or_province): \"\"\"Sets the state_or_province of", "_ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list):", "to all users. Different countries will mask postal/zip codes in", "\"\"\"Sets the county of this ItemLocation. The county in which", "if address_line2 is not None: self.address_line2 = address_line2 if city", "Swagger\"\"\" # noqa: E501 self._address_line1 = None self._address_line2 = None", "ItemLocation. # noqa: E501 The first line of the street", "E501 :type: str \"\"\" self._city = city @property def country(self):", "# noqa: E501 :param country: The country of this ItemLocation.", "ItemLocation. # noqa: E501 The city in which the item", "dict\"\"\" result = {} for attr, _ in six.iteritems(self.swagger_types): value", "is not None: self.country = country if county is not", "be masked to all users. Different countries will mask postal/zip", "= postal_code if state_or_province is not None: self.state_or_province = state_or_province", "city in which the item is located. # noqa: E501", "= None self._state_or_province = None self.discriminator = None if address_line1", "attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict()", "not None: self.city = city if country is not None:", ":return: The postal_code of this ItemLocation. # noqa: E501 :rtype:", "six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] =", "county of this ItemLocation. # noqa: E501 The county in", "</b>Beginning in late January 2020, the displayed postal code will", "the state_or_province of this ItemLocation. # noqa: E501 The state", "is located. # noqa: E501 :return: The county of this", "is attribute name and the value is attribute type. attribute_map", "\"\"\"Sets the state_or_province of this ItemLocation. The state or province", "Do not edit the class manually. \"\"\" \"\"\" Attributes: swagger_types", "@postal_code.setter def postal_code(self, postal_code): \"\"\"Sets the postal_code of this ItemLocation.", "but an example would be <code>951**</code>.</span> # noqa: E501 :param", "ItemLocation. # noqa: E501 :type: str \"\"\" self._county = county", "# noqa: E501 The second line of the street address.", "ItemLocation. The postal code (or zip code in US) where", "utf-8 \"\"\" Marketplace Insights API <a href=\\\"https://developer.ebay.com/api-docs/static/versioning.html#limited\\\" target=\\\"_blank\\\"> <img src=\\\"/cms/img/docs/partners-api.svg\\\"", "\"\"\"Gets the postal_code of this ItemLocation. # noqa: E501 The", "'postal_code': 'str', 'state_or_province': 'str' } attribute_map = { 'address_line1': 'addressLine1',", "class manually. \"\"\" \"\"\" Attributes: swagger_types (dict): The key is", "is not None: self.city = city if country is not", "# noqa: E501 :type: str \"\"\" self._address_line1 = address_line1 @property", "this ItemLocation. # noqa: E501 :rtype: str \"\"\" return self._postal_code", "self._postal_code = None self._state_or_province = None self.discriminator = None if", "result def to_str(self): \"\"\"Returns the string representation of the model\"\"\"", "pprint.pformat(self.to_dict()) def __repr__(self): \"\"\"For `print` and `pprint`\"\"\" return self.to_str() def", "Release\\\" />(Limited Release)</a> The Marketplace Insights API provides the ability", "value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map(", "\"\"\" return self._address_line1 @address_line1.setter def address_line1(self, address_line1): \"\"\"Sets the address_line1", "to_dict(self): \"\"\"Returns the model properties as a dict\"\"\" result =", "@property def address_line2(self): \"\"\"Gets the address_line2 of this ItemLocation. #", "<a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :return: The country", ":type: str \"\"\" self._county = county @property def postal_code(self): \"\"\"Gets", "str \"\"\" self._city = city @property def country(self): \"\"\"Gets the", "city @property def country(self): \"\"\"Gets the country of this ItemLocation.", "address. # noqa: E501 :param address_line1: The address_line1 of this", "county(self, county): \"\"\"Sets the county of this ItemLocation. The county", "is not None: self.address_line2 = address_line2 if city is not", "self.country = country if county is not None: self.county =", "attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value,", "refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :return:", ":type: str \"\"\" self._country = country @property def county(self): \"\"\"Gets", "is located. # noqa: E501 :param city: The city of", "E501 OpenAPI spec version: v1_beta.2.2 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" import", "is attribute type. attribute_map (dict): The key is attribute name", "} attribute_map = { 'address_line1': 'addressLine1', 'address_line2': 'addressLine2', 'city': 'city',", "noqa: E501 :type: str \"\"\" self._postal_code = postal_code @property def", "county @property def postal_code(self): \"\"\"Gets the postal_code of this ItemLocation.", "class ItemLocation(object): \"\"\"NOTE: This class is auto generated by the" ]
[ "100 data.sort(key=lambda x: x[1]/x[2], reverse=True) profit=0 ans=[] i=0 while i<n:", "[40,50,30,10,10,40,30] pro = [30,20,20,25,5,35,15] n = len(wt) data = [", "for i in range(n) ] bag = 100 data.sort(key=lambda x:", "reverse=True) profit=0 ans=[] i=0 while i<n: if data[i][2]<=bag: bag-=data[i][2] ans.append(data[i][0])", "# Fractional Knapsack wt = [40,50,30,10,10,40,30] pro = [30,20,20,25,5,35,15] n", "bag-=data[i][2] ans.append(data[i][0]) profit+=data[i][1] i+=1 else: break if i<n: ans.append(data[i][0]) profit", "i=0 while i<n: if data[i][2]<=bag: bag-=data[i][2] ans.append(data[i][0]) profit+=data[i][1] i+=1 else:", "x[1]/x[2], reverse=True) profit=0 ans=[] i=0 while i<n: if data[i][2]<=bag: bag-=data[i][2]", "data[i][2]<=bag: bag-=data[i][2] ans.append(data[i][0]) profit+=data[i][1] i+=1 else: break if i<n: ans.append(data[i][0])", "[30,20,20,25,5,35,15] n = len(wt) data = [ (i,pro[i],wt[i]) for i", "data = [ (i,pro[i],wt[i]) for i in range(n) ] bag", "ans=[] i=0 while i<n: if data[i][2]<=bag: bag-=data[i][2] ans.append(data[i][0]) profit+=data[i][1] i+=1", "i<n: if data[i][2]<=bag: bag-=data[i][2] ans.append(data[i][0]) profit+=data[i][1] i+=1 else: break if", "profit+=data[i][1] i+=1 else: break if i<n: ans.append(data[i][0]) profit += (bag*data[i][1])/data[i][2]", "(i,pro[i],wt[i]) for i in range(n) ] bag = 100 data.sort(key=lambda", "profit=0 ans=[] i=0 while i<n: if data[i][2]<=bag: bag-=data[i][2] ans.append(data[i][0]) profit+=data[i][1]", "n = len(wt) data = [ (i,pro[i],wt[i]) for i in", "bag = 100 data.sort(key=lambda x: x[1]/x[2], reverse=True) profit=0 ans=[] i=0", "i+=1 else: break if i<n: ans.append(data[i][0]) profit += (bag*data[i][1])/data[i][2] print(profit,ans)", "= [40,50,30,10,10,40,30] pro = [30,20,20,25,5,35,15] n = len(wt) data =", "= 100 data.sort(key=lambda x: x[1]/x[2], reverse=True) profit=0 ans=[] i=0 while", "] bag = 100 data.sort(key=lambda x: x[1]/x[2], reverse=True) profit=0 ans=[]", "data.sort(key=lambda x: x[1]/x[2], reverse=True) profit=0 ans=[] i=0 while i<n: if", "i in range(n) ] bag = 100 data.sort(key=lambda x: x[1]/x[2],", "ans.append(data[i][0]) profit+=data[i][1] i+=1 else: break if i<n: ans.append(data[i][0]) profit +=", "pro = [30,20,20,25,5,35,15] n = len(wt) data = [ (i,pro[i],wt[i])", "= [ (i,pro[i],wt[i]) for i in range(n) ] bag =", "if data[i][2]<=bag: bag-=data[i][2] ans.append(data[i][0]) profit+=data[i][1] i+=1 else: break if i<n:", "[ (i,pro[i],wt[i]) for i in range(n) ] bag = 100", "while i<n: if data[i][2]<=bag: bag-=data[i][2] ans.append(data[i][0]) profit+=data[i][1] i+=1 else: break", "len(wt) data = [ (i,pro[i],wt[i]) for i in range(n) ]", "<filename>fractionalKnapsack.py # Fractional Knapsack wt = [40,50,30,10,10,40,30] pro = [30,20,20,25,5,35,15]", "in range(n) ] bag = 100 data.sort(key=lambda x: x[1]/x[2], reverse=True)", "Knapsack wt = [40,50,30,10,10,40,30] pro = [30,20,20,25,5,35,15] n = len(wt)", "wt = [40,50,30,10,10,40,30] pro = [30,20,20,25,5,35,15] n = len(wt) data", "= [30,20,20,25,5,35,15] n = len(wt) data = [ (i,pro[i],wt[i]) for", "range(n) ] bag = 100 data.sort(key=lambda x: x[1]/x[2], reverse=True) profit=0", "Fractional Knapsack wt = [40,50,30,10,10,40,30] pro = [30,20,20,25,5,35,15] n =", "x: x[1]/x[2], reverse=True) profit=0 ans=[] i=0 while i<n: if data[i][2]<=bag:", "= len(wt) data = [ (i,pro[i],wt[i]) for i in range(n)" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "'avx512') _emit_function(fe, avx512_kmask_op, 'avx512_maskop') _emit_function(fe, avx_isa_sets, 'avx') _emit_function(fe, sse_isa_sets, 'sse')", "in SSE are # \"SSE-opcodes\" with memop operands. One can", "and not re.search('X87',ii.isa_set) and not re.search('MWAIT',ii.isa_set)): sse_isa_sets.add(ii.isa_set) fe = agi.open_file('xed-classifiers.c')", "MMX instr in SSE are # \"SSE-opcodes\" with memop operands.", "> 0: switch.add('return 1;') switch.add_default(['return 0;'], do_break=False) switch.finish() fo.emit_file_emitter(fe) def", "avx512_isa_sets.add(ii.isa_set) if re.search('KOP',ii.isa_set): avx512_kmask_op.add(ii.isa_set) elif re.search('AVX',ii.isa_set) or ii.isa_set in ['F16C',", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "= set([]) avx_isa_sets = set([]) avx512_isa_sets = set([]) avx512_kmask_op =", "= set([]) avx512_kmask_op = set([]) for generator in agi.generator_list: for", "# Exclude MMX instructions that come in with SSE2 &", "switch.finish() fo.emit_file_emitter(fe) def work(agi): sse_isa_sets = set([]) avx_isa_sets = set([])", "#END_LEGAL from __future__ import print_function import re import genutil import", "distributed under the License is distributed on an \"AS IS\"", "SSE_PREFETCH operations; Those are # just memops. if (not re.search('MMX',ii.isa_set)", "or ii.isa_set in ['AES','PCLMULQDQ']: # Exclude MMX instructions that come", "from __future__ import print_function import re import genutil import codegen", "print_function import re import genutil import codegen def _emit_function(fe, isa_sets,", "import re import genutil import codegen def _emit_function(fe, isa_sets, name):", "import genutil import codegen def _emit_function(fe, isa_sets, name): fo =", "in generator.parser_output.instructions: if genutil.field_check(ii, 'iclass'): if re.search('AVX512',ii.isa_set): avx512_isa_sets.add(ii.isa_set) if re.search('KOP',ii.isa_set):", "code switch = codegen.c_switch_generator_t('isa_set', fo) isa_sets_sorted = sorted(isa_sets) for c", "SSE2 & # SSSE3. The several purely MMX instr in", "the specific language governing permissions and # limitations under the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "xed_isa_set_enum_t isa_set = xed_decoded_inst_get_isa_set(d)') # FIXME: 2017-07-14 optimization: could use", "generator in agi.generator_list: for ii in generator.parser_output.instructions: if genutil.field_check(ii, 'iclass'):", "SSE are # \"SSE-opcodes\" with memop operands. One can look", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "use a static array for faster checking, smaller code switch", "c in isa_sets_sorted: switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False) if len(isa_sets) > 0: switch.add('return 1;')", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "elif re.search('SSE',ii.isa_set) or ii.isa_set in ['AES','PCLMULQDQ']: # Exclude MMX instructions", "isa_sets_sorted = sorted(isa_sets) for c in isa_sets_sorted: switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False) if len(isa_sets)", "Intel Corporation # # Licensed under the Apache License, Version", "and # limitations under the License. # #END_LEGAL from __future__", "avx512_kmask_op, 'avx512_maskop') _emit_function(fe, avx_isa_sets, 'avx') _emit_function(fe, sse_isa_sets, 'sse') fe.close() return", "governing permissions and # limitations under the License. # #END_LEGAL", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "checking, smaller code switch = codegen.c_switch_generator_t('isa_set', fo) isa_sets_sorted = sorted(isa_sets)", "xed_decoded_inst_get_isa_set(d)') # FIXME: 2017-07-14 optimization: could use a static array", "for faster checking, smaller code switch = codegen.c_switch_generator_t('isa_set', fo) isa_sets_sorted", "The several purely MMX instr in SSE are # \"SSE-opcodes\"", "not use this file except in compliance with the License.", "generator.parser_output.instructions: if genutil.field_check(ii, 'iclass'): if re.search('AVX512',ii.isa_set): avx512_isa_sets.add(ii.isa_set) if re.search('KOP',ii.isa_set): avx512_kmask_op.add(ii.isa_set)", "__future__ import print_function import re import genutil import codegen def", "= sorted(isa_sets) for c in isa_sets_sorted: switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False) if len(isa_sets) >", "re.search('AVX512',ii.isa_set): avx512_isa_sets.add(ii.isa_set) if re.search('KOP',ii.isa_set): avx512_kmask_op.add(ii.isa_set) elif re.search('AVX',ii.isa_set) or ii.isa_set in", "#Copyright (c) 2019 Intel Corporation # # Licensed under the", "# # Also exclude the SSE_PREFETCH operations; Those are #", "writing, software # distributed under the License is distributed on", "several purely MMX instr in SSE are # \"SSE-opcodes\" with", "import print_function import re import genutil import codegen def _emit_function(fe,", "in writing, software # distributed under the License is distributed", "ii.isa_set in ['F16C', 'FMA']: avx_isa_sets.add(ii.isa_set) elif re.search('SSE',ii.isa_set) or ii.isa_set in", "re.search('AVX',ii.isa_set) or ii.isa_set in ['F16C', 'FMA']: avx_isa_sets.add(ii.isa_set) elif re.search('SSE',ii.isa_set) or", "= xed_decoded_inst_get_isa_set(d)') # FIXME: 2017-07-14 optimization: could use a static", "_emit_function(fe, isa_sets, name): fo = codegen.function_object_t('xed_classify_{}'.format(name)) fo.add_arg('const xed_decoded_inst_t* d') fo.add_code_eol('", "you may not use this file except in compliance with", "# #Copyright (c) 2019 Intel Corporation # # Licensed under", "purely MMX instr in SSE are # \"SSE-opcodes\" with memop", "fe = agi.open_file('xed-classifiers.c') # xed_file_emitter_t _emit_function(fe, avx512_isa_sets, 'avx512') _emit_function(fe, avx512_kmask_op,", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "and SSSE3MMX xed isa_sets. # # Also exclude the SSE_PREFETCH", "exclude the SSE_PREFETCH operations; Those are # just memops. if", "d') fo.add_code_eol(' const xed_isa_set_enum_t isa_set = xed_decoded_inst_get_isa_set(d)') # FIXME: 2017-07-14", "in ['AES','PCLMULQDQ']: # Exclude MMX instructions that come in with", "-*- #BEGIN_LEGAL # #Copyright (c) 2019 Intel Corporation # #", "avx512_isa_sets = set([]) avx512_kmask_op = set([]) for generator in agi.generator_list:", "re.search('X87',ii.isa_set) and not re.search('MWAIT',ii.isa_set)): sse_isa_sets.add(ii.isa_set) fe = agi.open_file('xed-classifiers.c') # xed_file_emitter_t", "agi.open_file('xed-classifiers.c') # xed_file_emitter_t _emit_function(fe, avx512_isa_sets, 'avx512') _emit_function(fe, avx512_kmask_op, 'avx512_maskop') _emit_function(fe,", "fo = codegen.function_object_t('xed_classify_{}'.format(name)) fo.add_arg('const xed_decoded_inst_t* d') fo.add_code_eol(' const xed_isa_set_enum_t isa_set", "= set([]) for generator in agi.generator_list: for ii in generator.parser_output.instructions:", "re.search('MMX',ii.isa_set) and not re.search('PREFETCH',ii.isa_set) and not re.search('X87',ii.isa_set) and not re.search('MWAIT',ii.isa_set)):", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "-*- python -*- #BEGIN_LEGAL # #Copyright (c) 2019 Intel Corporation", "are # just memops. if (not re.search('MMX',ii.isa_set) and not re.search('PREFETCH',ii.isa_set)", "if re.search('AVX512',ii.isa_set): avx512_isa_sets.add(ii.isa_set) if re.search('KOP',ii.isa_set): avx512_kmask_op.add(ii.isa_set) elif re.search('AVX',ii.isa_set) or ii.isa_set", "# -*- python -*- #BEGIN_LEGAL # #Copyright (c) 2019 Intel", "elif re.search('AVX',ii.isa_set) or ii.isa_set in ['F16C', 'FMA']: avx_isa_sets.add(ii.isa_set) elif re.search('SSE',ii.isa_set)", "'iclass'): if re.search('AVX512',ii.isa_set): avx512_isa_sets.add(ii.isa_set) if re.search('KOP',ii.isa_set): avx512_kmask_op.add(ii.isa_set) elif re.search('AVX',ii.isa_set) or", "if re.search('KOP',ii.isa_set): avx512_kmask_op.add(ii.isa_set) elif re.search('AVX',ii.isa_set) or ii.isa_set in ['F16C', 'FMA']:", "SSSE3. The several purely MMX instr in SSE are #", "CONDITIONS OF ANY KIND, either express or implied. # See", "# just memops. if (not re.search('MMX',ii.isa_set) and not re.search('PREFETCH',ii.isa_set) and", "sse_isa_sets.add(ii.isa_set) fe = agi.open_file('xed-classifiers.c') # xed_file_emitter_t _emit_function(fe, avx512_isa_sets, 'avx512') _emit_function(fe,", "def work(agi): sse_isa_sets = set([]) avx_isa_sets = set([]) avx512_isa_sets =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "look for # those with SSE2MMX and SSSE3MMX xed isa_sets.", "License. # You may obtain a copy of the License", "#BEGIN_LEGAL # #Copyright (c) 2019 Intel Corporation # # Licensed", "License, Version 2.0 (the \"License\"); # you may not use", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "\"SSE-opcodes\" with memop operands. One can look for # those", "# You may obtain a copy of the License at", "not re.search('PREFETCH',ii.isa_set) and not re.search('X87',ii.isa_set) and not re.search('MWAIT',ii.isa_set)): sse_isa_sets.add(ii.isa_set) fe", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "for # those with SSE2MMX and SSSE3MMX xed isa_sets. #", "Also exclude the SSE_PREFETCH operations; Those are # just memops.", "# FIXME: 2017-07-14 optimization: could use a static array for", "can look for # those with SSE2MMX and SSSE3MMX xed", "xed isa_sets. # # Also exclude the SSE_PREFETCH operations; Those", "fo.emit_file_emitter(fe) def work(agi): sse_isa_sets = set([]) avx_isa_sets = set([]) avx512_isa_sets", "under the License is distributed on an \"AS IS\" BASIS,", "isa_sets, name): fo = codegen.function_object_t('xed_classify_{}'.format(name)) fo.add_arg('const xed_decoded_inst_t* d') fo.add_code_eol(' const", "or ii.isa_set in ['F16C', 'FMA']: avx_isa_sets.add(ii.isa_set) elif re.search('SSE',ii.isa_set) or ii.isa_set", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "instructions that come in with SSE2 & # SSSE3. The", "License for the specific language governing permissions and # limitations", "name): fo = codegen.function_object_t('xed_classify_{}'.format(name)) fo.add_arg('const xed_decoded_inst_t* d') fo.add_code_eol(' const xed_isa_set_enum_t", "isa_set = xed_decoded_inst_get_isa_set(d)') # FIXME: 2017-07-14 optimization: could use a", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "0: switch.add('return 1;') switch.add_default(['return 0;'], do_break=False) switch.finish() fo.emit_file_emitter(fe) def work(agi):", "work(agi): sse_isa_sets = set([]) avx_isa_sets = set([]) avx512_isa_sets = set([])", "codegen.c_switch_generator_t('isa_set', fo) isa_sets_sorted = sorted(isa_sets) for c in isa_sets_sorted: switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False)", "operands. One can look for # those with SSE2MMX and", "genutil.field_check(ii, 'iclass'): if re.search('AVX512',ii.isa_set): avx512_isa_sets.add(ii.isa_set) if re.search('KOP',ii.isa_set): avx512_kmask_op.add(ii.isa_set) elif re.search('AVX',ii.isa_set)", "= codegen.function_object_t('xed_classify_{}'.format(name)) fo.add_arg('const xed_decoded_inst_t* d') fo.add_code_eol(' const xed_isa_set_enum_t isa_set =", "avx512_isa_sets, 'avx512') _emit_function(fe, avx512_kmask_op, 'avx512_maskop') _emit_function(fe, avx_isa_sets, 'avx') _emit_function(fe, sse_isa_sets,", "0;'], do_break=False) switch.finish() fo.emit_file_emitter(fe) def work(agi): sse_isa_sets = set([]) avx_isa_sets", "FIXME: 2017-07-14 optimization: could use a static array for faster", "the License for the specific language governing permissions and #", "and not re.search('MWAIT',ii.isa_set)): sse_isa_sets.add(ii.isa_set) fe = agi.open_file('xed-classifiers.c') # xed_file_emitter_t _emit_function(fe,", "SSE2MMX and SSSE3MMX xed isa_sets. # # Also exclude the", "(the \"License\"); # you may not use this file except", "in agi.generator_list: for ii in generator.parser_output.instructions: if genutil.field_check(ii, 'iclass'): if", "Apache License, Version 2.0 (the \"License\"); # you may not", "memops. if (not re.search('MMX',ii.isa_set) and not re.search('PREFETCH',ii.isa_set) and not re.search('X87',ii.isa_set)", "# you may not use this file except in compliance", "# limitations under the License. # #END_LEGAL from __future__ import", "'FMA']: avx_isa_sets.add(ii.isa_set) elif re.search('SSE',ii.isa_set) or ii.isa_set in ['AES','PCLMULQDQ']: # Exclude", "either express or implied. # See the License for the", "if (not re.search('MMX',ii.isa_set) and not re.search('PREFETCH',ii.isa_set) and not re.search('X87',ii.isa_set) and", "memop operands. One can look for # those with SSE2MMX", "come in with SSE2 & # SSSE3. The several purely", "with memop operands. One can look for # those with", "OR CONDITIONS OF ANY KIND, either express or implied. #", "genutil import codegen def _emit_function(fe, isa_sets, name): fo = codegen.function_object_t('xed_classify_{}'.format(name))", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "the License is distributed on an \"AS IS\" BASIS, #", "instr in SSE are # \"SSE-opcodes\" with memop operands. One", "array for faster checking, smaller code switch = codegen.c_switch_generator_t('isa_set', fo)", "avx512_kmask_op = set([]) for generator in agi.generator_list: for ii in", "in compliance with the License. # You may obtain a", "switch.add_default(['return 0;'], do_break=False) switch.finish() fo.emit_file_emitter(fe) def work(agi): sse_isa_sets = set([])", "do_break=False) switch.finish() fo.emit_file_emitter(fe) def work(agi): sse_isa_sets = set([]) avx_isa_sets =", "are # \"SSE-opcodes\" with memop operands. One can look for", "# those with SSE2MMX and SSSE3MMX xed isa_sets. # #", "software # distributed under the License is distributed on an", "re.search('PREFETCH',ii.isa_set) and not re.search('X87',ii.isa_set) and not re.search('MWAIT',ii.isa_set)): sse_isa_sets.add(ii.isa_set) fe =", "if genutil.field_check(ii, 'iclass'): if re.search('AVX512',ii.isa_set): avx512_isa_sets.add(ii.isa_set) if re.search('KOP',ii.isa_set): avx512_kmask_op.add(ii.isa_set) elif", "sse_isa_sets = set([]) avx_isa_sets = set([]) avx512_isa_sets = set([]) avx512_kmask_op", "# Also exclude the SSE_PREFETCH operations; Those are # just", "# xed_file_emitter_t _emit_function(fe, avx512_isa_sets, 'avx512') _emit_function(fe, avx512_kmask_op, 'avx512_maskop') _emit_function(fe, avx_isa_sets,", "# # Unless required by applicable law or agreed to", "optimization: could use a static array for faster checking, smaller", "the License. # #END_LEGAL from __future__ import print_function import re", "# #END_LEGAL from __future__ import print_function import re import genutil", "ii in generator.parser_output.instructions: if genutil.field_check(ii, 'iclass'): if re.search('AVX512',ii.isa_set): avx512_isa_sets.add(ii.isa_set) if", "not re.search('X87',ii.isa_set) and not re.search('MWAIT',ii.isa_set)): sse_isa_sets.add(ii.isa_set) fe = agi.open_file('xed-classifiers.c') #", "limitations under the License. # #END_LEGAL from __future__ import print_function", "set([]) for generator in agi.generator_list: for ii in generator.parser_output.instructions: if", "= agi.open_file('xed-classifiers.c') # xed_file_emitter_t _emit_function(fe, avx512_isa_sets, 'avx512') _emit_function(fe, avx512_kmask_op, 'avx512_maskop')", "fo.add_arg('const xed_decoded_inst_t* d') fo.add_code_eol(' const xed_isa_set_enum_t isa_set = xed_decoded_inst_get_isa_set(d)') #", "static array for faster checking, smaller code switch = codegen.c_switch_generator_t('isa_set',", "in isa_sets_sorted: switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False) if len(isa_sets) > 0: switch.add('return 1;') switch.add_default(['return", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "re.search('SSE',ii.isa_set) or ii.isa_set in ['AES','PCLMULQDQ']: # Exclude MMX instructions that", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "Those are # just memops. if (not re.search('MMX',ii.isa_set) and not", "re.search('MWAIT',ii.isa_set)): sse_isa_sets.add(ii.isa_set) fe = agi.open_file('xed-classifiers.c') # xed_file_emitter_t _emit_function(fe, avx512_isa_sets, 'avx512')", "xed_file_emitter_t _emit_function(fe, avx512_isa_sets, 'avx512') _emit_function(fe, avx512_kmask_op, 'avx512_maskop') _emit_function(fe, avx_isa_sets, 'avx')", "for generator in agi.generator_list: for ii in generator.parser_output.instructions: if genutil.field_check(ii,", "python -*- #BEGIN_LEGAL # #Copyright (c) 2019 Intel Corporation #", "Version 2.0 (the \"License\"); # you may not use this", "re.search('KOP',ii.isa_set): avx512_kmask_op.add(ii.isa_set) elif re.search('AVX',ii.isa_set) or ii.isa_set in ['F16C', 'FMA']: avx_isa_sets.add(ii.isa_set)", "['AES','PCLMULQDQ']: # Exclude MMX instructions that come in with SSE2", "law or agreed to in writing, software # distributed under", "(not re.search('MMX',ii.isa_set) and not re.search('PREFETCH',ii.isa_set) and not re.search('X87',ii.isa_set) and not", "(c) 2019 Intel Corporation # # Licensed under the Apache", "just memops. if (not re.search('MMX',ii.isa_set) and not re.search('PREFETCH',ii.isa_set) and not", "switch.add('return 1;') switch.add_default(['return 0;'], do_break=False) switch.finish() fo.emit_file_emitter(fe) def work(agi): sse_isa_sets", "permissions and # limitations under the License. # #END_LEGAL from", "Exclude MMX instructions that come in with SSE2 & #", "fo.add_code_eol(' const xed_isa_set_enum_t isa_set = xed_decoded_inst_get_isa_set(d)') # FIXME: 2017-07-14 optimization:", "isa_sets_sorted: switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False) if len(isa_sets) > 0: switch.add('return 1;') switch.add_default(['return 0;'],", "# \"SSE-opcodes\" with memop operands. One can look for #", "implied. # See the License for the specific language governing", "in with SSE2 & # SSSE3. The several purely MMX", "def _emit_function(fe, isa_sets, name): fo = codegen.function_object_t('xed_classify_{}'.format(name)) fo.add_arg('const xed_decoded_inst_t* d')", "those with SSE2MMX and SSSE3MMX xed isa_sets. # # Also", "could use a static array for faster checking, smaller code", "under the Apache License, Version 2.0 (the \"License\"); # you", "1;') switch.add_default(['return 0;'], do_break=False) switch.finish() fo.emit_file_emitter(fe) def work(agi): sse_isa_sets =", "\"License\"); # you may not use this file except in", "['F16C', 'FMA']: avx_isa_sets.add(ii.isa_set) elif re.search('SSE',ii.isa_set) or ii.isa_set in ['AES','PCLMULQDQ']: #", "isa_sets. # # Also exclude the SSE_PREFETCH operations; Those are", "python # -*- python -*- #BEGIN_LEGAL # #Copyright (c) 2019", "if len(isa_sets) > 0: switch.add('return 1;') switch.add_default(['return 0;'], do_break=False) switch.finish()", "set([]) avx512_kmask_op = set([]) for generator in agi.generator_list: for ii", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "= codegen.c_switch_generator_t('isa_set', fo) isa_sets_sorted = sorted(isa_sets) for c in isa_sets_sorted:", "fo) isa_sets_sorted = sorted(isa_sets) for c in isa_sets_sorted: switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False) if", "_emit_function(fe, avx512_isa_sets, 'avx512') _emit_function(fe, avx512_kmask_op, 'avx512_maskop') _emit_function(fe, avx_isa_sets, 'avx') _emit_function(fe,", "xed_decoded_inst_t* d') fo.add_code_eol(' const xed_isa_set_enum_t isa_set = xed_decoded_inst_get_isa_set(d)') # FIXME:", "len(isa_sets) > 0: switch.add('return 1;') switch.add_default(['return 0;'], do_break=False) switch.finish() fo.emit_file_emitter(fe)", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "2017-07-14 optimization: could use a static array for faster checking,", "set([]) avx_isa_sets = set([]) avx512_isa_sets = set([]) avx512_kmask_op = set([])", "set([]) avx512_isa_sets = set([]) avx512_kmask_op = set([]) for generator in", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "re import genutil import codegen def _emit_function(fe, isa_sets, name): fo", "import codegen def _emit_function(fe, isa_sets, name): fo = codegen.function_object_t('xed_classify_{}'.format(name)) fo.add_arg('const", "codegen.function_object_t('xed_classify_{}'.format(name)) fo.add_arg('const xed_decoded_inst_t* d') fo.add_code_eol(' const xed_isa_set_enum_t isa_set = xed_decoded_inst_get_isa_set(d)')", "const xed_isa_set_enum_t isa_set = xed_decoded_inst_get_isa_set(d)') # FIXME: 2017-07-14 optimization: could", "MMX instructions that come in with SSE2 & # SSSE3.", "switch = codegen.c_switch_generator_t('isa_set', fo) isa_sets_sorted = sorted(isa_sets) for c in", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "#!/usr/bin/env python # -*- python -*- #BEGIN_LEGAL # #Copyright (c)", "smaller code switch = codegen.c_switch_generator_t('isa_set', fo) isa_sets_sorted = sorted(isa_sets) for", "in ['F16C', 'FMA']: avx_isa_sets.add(ii.isa_set) elif re.search('SSE',ii.isa_set) or ii.isa_set in ['AES','PCLMULQDQ']:", "License. # #END_LEGAL from __future__ import print_function import re import", "& # SSSE3. The several purely MMX instr in SSE", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "faster checking, smaller code switch = codegen.c_switch_generator_t('isa_set', fo) isa_sets_sorted =", "sorted(isa_sets) for c in isa_sets_sorted: switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False) if len(isa_sets) > 0:", "operations; Those are # just memops. if (not re.search('MMX',ii.isa_set) and", "You may obtain a copy of the License at #", "a static array for faster checking, smaller code switch =", "language governing permissions and # limitations under the License. #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "for ii in generator.parser_output.instructions: if genutil.field_check(ii, 'iclass'): if re.search('AVX512',ii.isa_set): avx512_isa_sets.add(ii.isa_set)", "agi.generator_list: for ii in generator.parser_output.instructions: if genutil.field_check(ii, 'iclass'): if re.search('AVX512',ii.isa_set):", "avx_isa_sets.add(ii.isa_set) elif re.search('SSE',ii.isa_set) or ii.isa_set in ['AES','PCLMULQDQ']: # Exclude MMX", "with SSE2 & # SSSE3. The several purely MMX instr", "One can look for # those with SSE2MMX and SSSE3MMX", "required by applicable law or agreed to in writing, software", "2019 Intel Corporation # # Licensed under the Apache License,", "= set([]) avx512_isa_sets = set([]) avx512_kmask_op = set([]) for generator", "that come in with SSE2 & # SSSE3. The several", "codegen def _emit_function(fe, isa_sets, name): fo = codegen.function_object_t('xed_classify_{}'.format(name)) fo.add_arg('const xed_decoded_inst_t*", "Corporation # # Licensed under the Apache License, Version 2.0", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "the Apache License, Version 2.0 (the \"License\"); # you may", "with SSE2MMX and SSSE3MMX xed isa_sets. # # Also exclude", "switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False) if len(isa_sets) > 0: switch.add('return 1;') switch.add_default(['return 0;'], do_break=False)", "_emit_function(fe, avx512_kmask_op, 'avx512_maskop') _emit_function(fe, avx_isa_sets, 'avx') _emit_function(fe, sse_isa_sets, 'sse') fe.close()", "not re.search('MWAIT',ii.isa_set)): sse_isa_sets.add(ii.isa_set) fe = agi.open_file('xed-classifiers.c') # xed_file_emitter_t _emit_function(fe, avx512_isa_sets,", "# SSSE3. The several purely MMX instr in SSE are", "under the License. # #END_LEGAL from __future__ import print_function import", "avx_isa_sets = set([]) avx512_isa_sets = set([]) avx512_kmask_op = set([]) for", "SSSE3MMX xed isa_sets. # # Also exclude the SSE_PREFETCH operations;", "avx512_kmask_op.add(ii.isa_set) elif re.search('AVX',ii.isa_set) or ii.isa_set in ['F16C', 'FMA']: avx_isa_sets.add(ii.isa_set) elif", "for c in isa_sets_sorted: switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False) if len(isa_sets) > 0: switch.add('return", "ii.isa_set in ['AES','PCLMULQDQ']: # Exclude MMX instructions that come in", "the SSE_PREFETCH operations; Those are # just memops. if (not", "and not re.search('PREFETCH',ii.isa_set) and not re.search('X87',ii.isa_set) and not re.search('MWAIT',ii.isa_set)): sse_isa_sets.add(ii.isa_set)" ]
[ "deleted.' @pytest.mark.vcr() def test_target_groups_edit(client, new_target_group): target_group = new_target_group edited_name =", "'test_target_group_edit' edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id) assert isinstance(edited_group, TargetGroup), u'The `edit`", "`edit` method did not return type `TargetGroup`.' assert edited_group.id ==", "isinstance(details, TargetGroup), u'The `details` method did not return type `TargetGroup`.'", "= new_target_group edited_name = 'test_target_group_edit' edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id) assert", "`details` method did not return type `TargetGroup`.' assert details.id ==", "target_group.id, u'Expected the `details` response to match the requested target", "group to match the requested target group.' assert edited_group.name ==", "@pytest.mark.vcr() def test_target_groups_list(client): target_groups = client.target_groups_api.list() assert isinstance(target_groups, TargetGroupList), u'The", "u'The `create` method did not return type `TargetGroup`.' @pytest.mark.vcr() def", "== target_group.id, u'Expected the `details` response to match the requested", "@pytest.mark.vcr() def test_target_groups_edit(client, new_target_group): target_group = new_target_group edited_name = 'test_target_group_edit'", "match the requested target group.' assert edited_group.name == edited_name, u'Expected", "`TargetGroup`.' assert edited_group.id == target_group.id, u'Expected the edited target group", "not return type `TargetGroup`.' assert edited_group.id == target_group.id, u'Expected the", "test_target_groups_create(new_target_group): assert isinstance(new_target_group, TargetGroup), u'The `create` method did not return", "test_target_groups_delete(client, new_target_group): assert client.target_groups_api.delete(new_target_group.id), u'The target group was not deleted.'", "assert details.id == target_group.id, u'Expected the `details` response to match", "the `details` response to match the requested target group.' @pytest.mark.vcr()", "u'Expected a list of type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_delete(client, new_target_group):", "from tenable_io.api.target_groups import TargetListEditRequest from tenable_io.api.models import TargetGroup, TargetGroupList @pytest.mark.vcr()", "response to match the requested target group.' @pytest.mark.vcr() def test_target_groups_list(client):", "edited_name = 'test_target_group_edit' edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id) assert isinstance(edited_group, TargetGroup),", "a list of type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_delete(client, new_target_group): assert", "did not return type `TargetGroup`.' for group in target_groups.target_groups: assert", "of type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_delete(client, new_target_group): assert client.target_groups_api.delete(new_target_group.id), u'The", "group.' assert edited_group.name == edited_name, u'Expected the name to be", "import TargetGroup, TargetGroupList @pytest.mark.vcr() def test_target_groups_create(new_target_group): assert isinstance(new_target_group, TargetGroup), u'The", "isinstance(new_target_group, TargetGroup), u'The `create` method did not return type `TargetGroup`.'", "@pytest.mark.vcr() def test_target_groups_create(new_target_group): assert isinstance(new_target_group, TargetGroup), u'The `create` method did", "TargetGroup), u'The `details` method did not return type `TargetGroup`.' assert", "not return type `TargetGroup`.' assert details.id == target_group.id, u'Expected the", "TargetListEditRequest from tenable_io.api.models import TargetGroup, TargetGroupList @pytest.mark.vcr() def test_target_groups_create(new_target_group): assert", "the requested target group.' assert edited_group.name == edited_name, u'Expected the", "new_target_group edited_name = 'test_target_group_edit' edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id) assert isinstance(edited_group,", "assert edited_group.name == edited_name, u'Expected the name to be updated.'", "= new_target_group details = client.target_groups_api.details(target_group.id) assert isinstance(details, TargetGroup), u'The `details`", "isinstance(target_groups, TargetGroupList), u'The `details` method did not return type `TargetGroup`.'", "details = client.target_groups_api.details(target_group.id) assert isinstance(details, TargetGroup), u'The `details` method did", "assert isinstance(new_target_group, TargetGroup), u'The `create` method did not return type", "target group was not deleted.' @pytest.mark.vcr() def test_target_groups_edit(client, new_target_group): target_group", "assert isinstance(edited_group, TargetGroup), u'The `edit` method did not return type", "return type `TargetGroup`.' assert edited_group.id == target_group.id, u'Expected the edited", "target group.' assert edited_group.name == edited_name, u'Expected the name to", "def test_target_groups_details(client, new_target_group): target_group = new_target_group details = client.target_groups_api.details(target_group.id) assert", "return type `TargetGroup`.' assert details.id == target_group.id, u'Expected the `details`", "was not deleted.' @pytest.mark.vcr() def test_target_groups_edit(client, new_target_group): target_group = new_target_group", "method did not return type `TargetGroup`.' assert edited_group.id == target_group.id,", "match the requested target group.' @pytest.mark.vcr() def test_target_groups_list(client): target_groups =", "assert isinstance(group, TargetGroup), u'Expected a list of type `TargetGroup`.' @pytest.mark.vcr()", "edited target group to match the requested target group.' assert", "did not return type `TargetGroup`.' assert details.id == target_group.id, u'Expected", "for group in target_groups.target_groups: assert isinstance(group, TargetGroup), u'Expected a list", "client.target_groups_api.delete(new_target_group.id), u'The target group was not deleted.' @pytest.mark.vcr() def test_target_groups_edit(client,", "target group to match the requested target group.' assert edited_group.name", "did not return type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_details(client, new_target_group): target_group", "target_groups.target_groups: assert isinstance(group, TargetGroup), u'Expected a list of type `TargetGroup`.'", "new_target_group): assert client.target_groups_api.delete(new_target_group.id), u'The target group was not deleted.' @pytest.mark.vcr()", "u'The `edit` method did not return type `TargetGroup`.' assert edited_group.id", "not deleted.' @pytest.mark.vcr() def test_target_groups_edit(client, new_target_group): target_group = new_target_group edited_name", "list of type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_delete(client, new_target_group): assert client.target_groups_api.delete(new_target_group.id),", "method did not return type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_details(client, new_target_group):", "group in target_groups.target_groups: assert isinstance(group, TargetGroup), u'Expected a list of", "u'The `details` method did not return type `TargetGroup`.' assert details.id", "group.' @pytest.mark.vcr() def test_target_groups_list(client): target_groups = client.target_groups_api.list() assert isinstance(target_groups, TargetGroupList),", "target_group.id) assert isinstance(edited_group, TargetGroup), u'The `edit` method did not return", "type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_details(client, new_target_group): target_group = new_target_group details", "`details` method did not return type `TargetGroup`.' for group in", "`TargetGroup`.' @pytest.mark.vcr() def test_target_groups_details(client, new_target_group): target_group = new_target_group details =", "method did not return type `TargetGroup`.' assert details.id == target_group.id,", "client.target_groups_api.list() assert isinstance(target_groups, TargetGroupList), u'The `details` method did not return", "import TargetListEditRequest from tenable_io.api.models import TargetGroup, TargetGroupList @pytest.mark.vcr() def test_target_groups_create(new_target_group):", "`TargetGroup`.' @pytest.mark.vcr() def test_target_groups_delete(client, new_target_group): assert client.target_groups_api.delete(new_target_group.id), u'The target group", "`TargetGroup`.' assert details.id == target_group.id, u'Expected the `details` response to", "TargetGroup), u'Expected a list of type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_delete(client,", "TargetGroup), u'The `create` method did not return type `TargetGroup`.' @pytest.mark.vcr()", "type `TargetGroup`.' assert details.id == target_group.id, u'Expected the `details` response", "details.id == target_group.id, u'Expected the `details` response to match the", "did not return type `TargetGroup`.' assert edited_group.id == target_group.id, u'Expected", "to match the requested target group.' @pytest.mark.vcr() def test_target_groups_list(client): target_groups", "TargetGroupList), u'The `details` method did not return type `TargetGroup`.' for", "isinstance(edited_group, TargetGroup), u'The `edit` method did not return type `TargetGroup`.'", "new_target_group): target_group = new_target_group edited_name = 'test_target_group_edit' edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name),", "@pytest.mark.vcr() def test_target_groups_delete(client, new_target_group): assert client.target_groups_api.delete(new_target_group.id), u'The target group was", "client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id) assert isinstance(edited_group, TargetGroup), u'The `edit` method did not", "not return type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_details(client, new_target_group): target_group =", "target_group = new_target_group edited_name = 'test_target_group_edit' edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id)", "tenable_io.api.models import TargetGroup, TargetGroupList @pytest.mark.vcr() def test_target_groups_create(new_target_group): assert isinstance(new_target_group, TargetGroup),", "target_group = new_target_group details = client.target_groups_api.details(target_group.id) assert isinstance(details, TargetGroup), u'The", "`details` response to match the requested target group.' @pytest.mark.vcr() def", "assert edited_group.id == target_group.id, u'Expected the edited target group to", "== target_group.id, u'Expected the edited target group to match the", "TargetGroup), u'The `edit` method did not return type `TargetGroup`.' assert", "requested target group.' @pytest.mark.vcr() def test_target_groups_list(client): target_groups = client.target_groups_api.list() assert", "not return type `TargetGroup`.' for group in target_groups.target_groups: assert isinstance(group,", "assert client.target_groups_api.delete(new_target_group.id), u'The target group was not deleted.' @pytest.mark.vcr() def", "new_target_group details = client.target_groups_api.details(target_group.id) assert isinstance(details, TargetGroup), u'The `details` method", "test_target_groups_edit(client, new_target_group): target_group = new_target_group edited_name = 'test_target_group_edit' edited_group =", "assert isinstance(details, TargetGroup), u'The `details` method did not return type", "`TargetGroup`.' for group in target_groups.target_groups: assert isinstance(group, TargetGroup), u'Expected a", "return type `TargetGroup`.' for group in target_groups.target_groups: assert isinstance(group, TargetGroup),", "type `TargetGroup`.' for group in target_groups.target_groups: assert isinstance(group, TargetGroup), u'Expected", "the requested target group.' @pytest.mark.vcr() def test_target_groups_list(client): target_groups = client.target_groups_api.list()", "new_target_group): target_group = new_target_group details = client.target_groups_api.details(target_group.id) assert isinstance(details, TargetGroup),", "client.target_groups_api.details(target_group.id) assert isinstance(details, TargetGroup), u'The `details` method did not return", "u'Expected the `details` response to match the requested target group.'", "to match the requested target group.' assert edited_group.name == edited_name,", "def test_target_groups_delete(client, new_target_group): assert client.target_groups_api.delete(new_target_group.id), u'The target group was not", "requested target group.' assert edited_group.name == edited_name, u'Expected the name", "import pytest from tenable_io.api.target_groups import TargetListEditRequest from tenable_io.api.models import TargetGroup,", "return type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_details(client, new_target_group): target_group = new_target_group", "= 'test_target_group_edit' edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id) assert isinstance(edited_group, TargetGroup), u'The", "the edited target group to match the requested target group.'", "def test_target_groups_edit(client, new_target_group): target_group = new_target_group edited_name = 'test_target_group_edit' edited_group", "`create` method did not return type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_details(client,", "= client.target_groups_api.list() assert isinstance(target_groups, TargetGroupList), u'The `details` method did not", "<reponame>lanz/Tenable.io-SDK-for-Python<gh_stars>10-100 import pytest from tenable_io.api.target_groups import TargetListEditRequest from tenable_io.api.models import", "method did not return type `TargetGroup`.' for group in target_groups.target_groups:", "= client.target_groups_api.details(target_group.id) assert isinstance(details, TargetGroup), u'The `details` method did not", "test_target_groups_details(client, new_target_group): target_group = new_target_group details = client.target_groups_api.details(target_group.id) assert isinstance(details,", "group was not deleted.' @pytest.mark.vcr() def test_target_groups_edit(client, new_target_group): target_group =", "edited_group.id == target_group.id, u'Expected the edited target group to match", "test_target_groups_list(client): target_groups = client.target_groups_api.list() assert isinstance(target_groups, TargetGroupList), u'The `details` method", "tenable_io.api.target_groups import TargetListEditRequest from tenable_io.api.models import TargetGroup, TargetGroupList @pytest.mark.vcr() def", "target_group.id, u'Expected the edited target group to match the requested", "type `TargetGroup`.' @pytest.mark.vcr() def test_target_groups_delete(client, new_target_group): assert client.target_groups_api.delete(new_target_group.id), u'The target", "u'The `details` method did not return type `TargetGroup`.' for group", "from tenable_io.api.models import TargetGroup, TargetGroupList @pytest.mark.vcr() def test_target_groups_create(new_target_group): assert isinstance(new_target_group,", "pytest from tenable_io.api.target_groups import TargetListEditRequest from tenable_io.api.models import TargetGroup, TargetGroupList", "isinstance(group, TargetGroup), u'Expected a list of type `TargetGroup`.' @pytest.mark.vcr() def", "assert isinstance(target_groups, TargetGroupList), u'The `details` method did not return type", "= client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id) assert isinstance(edited_group, TargetGroup), u'The `edit` method did", "type `TargetGroup`.' assert edited_group.id == target_group.id, u'Expected the edited target", "in target_groups.target_groups: assert isinstance(group, TargetGroup), u'Expected a list of type", "target group.' @pytest.mark.vcr() def test_target_groups_list(client): target_groups = client.target_groups_api.list() assert isinstance(target_groups,", "u'Expected the edited target group to match the requested target", "def test_target_groups_list(client): target_groups = client.target_groups_api.list() assert isinstance(target_groups, TargetGroupList), u'The `details`", "TargetGroup, TargetGroupList @pytest.mark.vcr() def test_target_groups_create(new_target_group): assert isinstance(new_target_group, TargetGroup), u'The `create`", "edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id) assert isinstance(edited_group, TargetGroup), u'The `edit` method", "target_groups = client.target_groups_api.list() assert isinstance(target_groups, TargetGroupList), u'The `details` method did", "def test_target_groups_create(new_target_group): assert isinstance(new_target_group, TargetGroup), u'The `create` method did not", "u'The target group was not deleted.' @pytest.mark.vcr() def test_target_groups_edit(client, new_target_group):", "@pytest.mark.vcr() def test_target_groups_details(client, new_target_group): target_group = new_target_group details = client.target_groups_api.details(target_group.id)", "TargetGroupList @pytest.mark.vcr() def test_target_groups_create(new_target_group): assert isinstance(new_target_group, TargetGroup), u'The `create` method" ]
[ "= torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8) return CQT elif output_format=='Complex': return torch.stack((CQT_real,CQT_imag),-1) elif output_format=='Phase':", "value is 'hann' pad_mode : str The padding method. Default", "function to get back another half if onesided: X =", "time)) if return_extras: return pred_stft, pred_mel.detach(), losses return pred_stft def", "of Mel filter banks. The filter banks maps the n_fft", "(*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT1992v2(torch.nn.Module): \"\"\"This function is to calculate", "= torch.nn.Parameter(wcos, requires_grad=trainable_kernels) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) else: self.register_buffer('wsin', wsin)", "time() # Create the window function and prepare the shape", "= torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) else: CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8) return CQT elif output_format=='Complex':", "= nn.ReflectionPad1d(self.n_fft//2) def forward(self,x,output_format=None): \"\"\" Convert a batch of waveforms", "intended waveform length. By default, ``length=None``, which will remove ``n_fft//2``", "(pp. 1-4), Oct. 2013. Parameters ---------- n_fft : int The", "self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x,output_format=None): \"\"\" Convert a batch of", "trainable_CQT: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real',", "for the positive and negative signs # ifft = e^(+2\\pi*j)*X", "the output. refresh_win : bool Recalculating the window sum square.", "self.n_bins = n_bins self.earlydownsample = earlydownsample # We will activate", "setting ``refresh_win=False``. Else please keep ``refresh_win=True`` \"\"\" if (hasattr(self, 'kernel_sin_inv')", "= CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) # Normalizing the output", "bool Normalization for the CQT result. basis_norm : int Normalization", "'Complex' will return the STFT result in complex number, shape", ": int The number of Mel filter banks. The filter", "pad_mode self.n_fft = n_fft self.power = power self.trainable_mel = trainable_mel", "# broadcast dimensions to support 2D convolution X_real_bc = X_real.unsqueeze(1)", "= n_fft self.power = power self.trainable_mel = trainable_mel self.trainable_STFT =", "{} if mel_inversion_params: mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params} if stft_inversion_params: stft_inversion_params", "\"\"\" x = broadcast_dim(x) if self.center: if self.pad_mode == 'constant':", "audio. It is used to calucate the correct ``fmin`` and", "Creating window function for stft and istft later self.w =", "= STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window, freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT,", "mel_basis = torch.tensor(mel_basis) if verbose==True: print(\"STFT filter created, time used", "is using the resampling method proposed in [1]. Instead of", "print(\"STFT kernels created, time used = {:.4f} seconds\".format(time()-start)) else: pass", "ParameterError('amin must be strictly positive') amin = torch.tensor([amin]) ref =", "refresh_win==True: self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten() self.nonzero_indices = (self.w_sum>1e-10)", "of (batch, freq_bins, timesteps, 2)\" # If the input spectrogram", "= time() # Creating kernel for mel spectrogram start =", "V = Vc[:, :, :, 0] * W_r - Vc[:,", "returns a tensor of spectrograms. ``shape = (num_samples, freq_bins,time_steps)`` if", "if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)", "normalization ### --------------------------- Spectrogram Classes ---------------------------### class STFT(torch.nn.Module): \"\"\"This function", "!= True): raise NameError(\"Please activate the iSTFT module by setting", "= n_bins self.earlydownsample = earlydownsample # We will activate early", "way, the inverse kernel and the forward kernel do not", "- self.top_db) return log_spec def _dct(self, x, norm=None): ''' Refer", "start and the end of the output. refresh_win : bool", "DFT(torch.nn.Module): \"\"\" Experimental feature before `torch.fft` was made avaliable. The", "self.norm = norm # Now norm is used to normalize", "start = time() # Creating kernel for mel spectrogram start", "pass if trainable_mel: # Making everything nn.Parameter, so that this", "nn.Parameter, so that the model can be used with nn.Parallel", "kernelLength=256, transitionBandwidth=0.001) ) # Broadcast the tensor to the shape", "self.kernel_sin, stride=(1,1)) # compute real and imag part. signal lies", "functions to the Fourier kernels window_mask = torch.tensor(window_mask) wsin =", "= torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1) # Decide if", "created, time used = {:.4f} seconds\".format(time()-start)) # Calculate num of", "if self.trainable==False: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if self.trainable==True: wsin =", "band_center = 0.5, kernelLength=256, transitionBandwidth=0.001 ) ) # Broadcast the", "will be updated during model training. Default value is ``False``", "value is ``None`` which is equivalent to ``n_fft//4``. Please make", "hop (or stride) size. Default value is ``None`` which is", "shape = ``(num_samples, freq_bins,time_steps, 2)``; ``Phase`` will return the phase", "If center==True, the STFT window will be put in the", "``None`` which is equivalent to ``n_fft//4``. window : str The", "self.n_fft = n_fft self.win_length = win_length self.n_iter = n_iter self.center", "value if ``True``. Please make sure the value is the", "cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) # If center==True, the STFT window will", "import * sz_float = 4 # size of a float", "output. If your input spectrograms X are of the same", "``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``; Examples -------- >>>", "time() kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.kernel_width, window='ones', freq_scale='no')", "imag/self.n_fft) class iSTFT(torch.nn.Module): \"\"\"This class is to convert spectrograms back", "algorithm. Therefore, we can reuse the code from the 1992", "trainable_CQT : bool Determine if the frequency domain CQT kernel", "STFT(torch.nn.Module): \"\"\"This function is to calculate the short-time Fourier transform", "which coresponds to the note C0. fmax : float The", "= torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag',", "or not. Default is ``False`` norm : int Normalization for", "is equivalent to the next lower octave. The kernel creation", "// 4) self.n_fft = n_fft self.win_length = win_length self.stride =", "frequency for the lowest Mel filter bank. fmax : int", "to waveforms. It only works for the complex value spectrograms.", "n_fft=2048, n_mels=128, hop_length=512, window='hann', center=True, pad_mode='reflect', power=2.0, htk=False, fmin=0.0, fmax=None,", "2)``; ``Phase`` will return the phase of the STFT reuslt,", "will be automatically broadcast to the right shape \"\"\" x", "torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0) due to output=0", "the start and the end of the output. If your", "and the Mel filter banks will be updated during model", "filter created, time used = {:.4f} seconds\".format(time()-start)) else: pass if", "CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': #", "padding method. Default value is 'reflect'. htk : bool When", "early downsampling later if possible self.trainable = trainable self.output_format =", "filter bank. fmax : int The ending frequency for the", "for possible windowing functions. The default value is 'hann' pad_mode", "of a constant Q transform.” (1992). This function is to", "downsampling filter created, \\ time used = {:.4f} seconds\".format(time()-start)) else:", "the same as the forward STFT. window : str The", "same as the forward STFT. freq_scale : 'linear', 'log', or", "= torch.tensor(fft_basis.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32)) if verbose==True: print(\"CQT kernels created,", "/ freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis = basis", "the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps,", "early_downsample_filter) if verbose==True: print(\"Early downsampling filter created, \\ time used", "Kernel size={}, iSTFT={}, trainable={}'.format( self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable ) class", "STFT by using conv1d # remove redundant parts spec_real =", "self.cqt_kernels_imag), (fourier_real, fourier_imag)) CQT = torch.stack((CQT_real,-CQT_imag),-1) if self.norm: CQT =", "n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # This is for the normalization", "= time() sr, self.hop_length, self.downsample_factor, early_downsample_filter, \\ self.earlydownsample = get_early_downsample_params(sr,", "= n_iter self.center = center self.pad_mode = pad_mode self.momentum =", "bin {}Hz has exceeded the Nyquist frequency, \\ please reduce", "= trainable_mel self.trainable_STFT = trainable_STFT self.verbose = verbose # Preparing", "is the same as the forward STFT. sr : int", "real = real.squeeze(-2)*self.window_mask # Normalize the amplitude with n_fft real", "self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, pad_mode=self.pad_mode) # Phase update rule angles[:,:,:]", "padding(x_real) # x_imag = padding(x_imag) # Watch out for the", "spec_layer = Spectrogram.iSTFT() >>> specs = spec_layer(x) \"\"\" def __init__(self,", "AssertionError(\"Signal length shorter than reflect padding length (n_fft // 2).\")", "sr, self.hop_length, self.downsample_factor, early_downsample_filter, \\ self.earlydownsample = get_early_downsample_params(sr, hop_length, fmax_t,", "Create filter windows for stft kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask", "early downsampling factor is to downsample the input audio to", "n_mfcc # attributes that will be used for _power_to_db if", "following shapes.\\n 1. ``(len_audio)``\\n 2. ``(num_audio, len_audio)``\\n 3. ``(num_audio, 1,", "the STFT results with a gigantic CQT kernel covering the", "same as the forward STFT. fmin : int The starting", "start = time() # Create the window function and prepare", "= torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0) if iSTFT: self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1)) self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1))", "computational speed. \"\"\" if refresh_win==None: refresh_win=self.refresh_win assert X.dim()==4 , \"Inverse", "Experimental feature before `torch.fft` was made avaliable. The inverse function", "the same output length of the original waveform, please set", "Examples -------- >>> spec_layer = Spectrogram.MFCC() >>> mfcc = spec_layer(x)", "so that it can be broadcasted batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1) log_spec", "nn.Parallel # self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable) # self.kernel_cos = torch.nn.Parameter(self.kernel_cos,", "center of the STFT kernel. Default value if ``True``. pad_mode", ": float The frequency for the highest CQT bin. Default", "type of spectrogram to be return. Can be either ``Magnitude``", "__init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs): super().__init__()", "as ``torch.nn.Module``. This alogrithm uses the resampling method proposed in", "trainable_mel : bool Determine if the Mel filter banks are", "sr / freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis =", "The output_format can also be changed during the ``forward`` method.", "print(\"Creating STFT kernels ...\", end='\\r') start = time() kernel_sin, kernel_cos,", "+ spec_imag.pow(2) if self.trainable==True: return torch.sqrt(spec+1e-8) # prevent Nan gradient", "``trainable=True`` and ``freq_scale!='no'``, there is no guarantee that the inverse", ">>> specs = spec_layer(x) \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=220,", "self.pad_amount:self.pad_amount + length] else: real = real[:, :length] return real", "only support type-II DCT at the moment. Input signal should", "if iSTFT: self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1)) self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1)) # Making all these", "proposed in [1]. I slightly modify it so that it", "= stft_inversion_params or {} if mel_inversion_params: mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params}", "dtype=torch.float) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float) # In this way, the", ": bool Determine if the CQT kernels are trainable or", "self.n_fft = n_fft self.win_length = win_length self.stride = hop_length self.center", "extra_repr(self) -> str: return 'Mel filter banks size = {},", "fourier_imag)) CQT = torch.stack((CQT_real,-CQT_imag),-1) if self.norm: CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1)) else:", "the iSTFT kernel, if ``True``, the time index is the", "calculating the correct frequency. hop_length : int The hop (or", "fold import numpy as np from time import time from", "-------- >>> spec_layer = Spectrogram.STFT() >>> specs = spec_layer(x) \"\"\"", "have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. Parameters ---------- onesided", "not need to be windowed window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1) # kernel_sin", "np from time import time from nnAudio.librosa_functions import * from", "octave. Default is 12. trainable_STFT : bool Determine if the", "pass filter created, time used = {:.4f} seconds\".format(time()-start)) # Caluate", "momentum=0.99, device='cpu'): super().__init__() self.n_fft = n_fft self.win_length = win_length self.n_iter", "= conv1d(x, self.wcos, stride=self.hop_length) fourier_imag = conv1d(x, self.wsin, stride=self.hop_length) #", "pad_mode self.n_bins = n_bins self.output_format = output_format self.earlydownsample = earlydownsample", "layer. Default value is 'cpu' \"\"\" def __init__(self, n_fft, n_iter=32,", "pad_mode self.output_format = output_format # creating kernels for CQT Q", "fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time domain", "elif output_format=='Complex': return torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign", "used = {:.4f} seconds\".format(time()-start)) # creating kernels for stft #", "Griffin_Lim(torch.nn.Module): \"\"\" Converting Magnitude spectrograms back to waveforms based on", "sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect',", "Calculate the top bin frequency fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) else: #", ":length] return real def extra_repr(self) -> str: return 'n_fft={}, Fourier", "required. if self.pad_mode == 'constant': self.padding = nn.ConstantPad1d(self.n_fft//2, 0) elif", "C0. fmax : float The frequency for the highest CQT", "self.m_mfcc = n_mfcc # attributes that will be used for", "swapping back the time axis and freq axis def forward(self,", "automatically broadcast to the right shape \"\"\" x = self.melspec_layer(x)", "spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. Parameters ---------- onesided : bool If", "basis_norm=1, window='hann', pad_mode='reflect', earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True): super().__init__() self.norm =", "raise AssertionError(\"Signal length shorter than reflect padding length (n_fft //", "V.permute(0,2,1) # swapping back the time axis and freq axis", "trainable_CQT=False, bins_per_octave=12, output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'): super().__init__() # norm", "kernel. Default value if ``True``. Please make sure the value", "x, output_format=None): \"\"\" Convert a batch of waveforms to CQT", "---------- onesided : bool If your spectrograms only have ``n_fft//2+1``", "= torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1) # kernel_sin and kernel_cos have the shape (freq_bins,", "If your spectrograms only have ``n_fft//2+1`` frequency bins, please use", "optimization\") if loss_threshold and loss < loss_threshold: if verbose: print(f\"Target", "min(bins_per_octave, n_bins) self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) if verbose==True: print(\"num_octave", "start = time() cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q, sr, fmin,", "Phase rand_phase = torch.randn(*S.shape, device=self.device) angles = torch.empty((*S.shape,2), device=self.device) angles[:,", "iSTFT kernel. Default value if ``True``. Please make sure the", "octave. Default is 12. norm : int Normalization for the", "# Please don't use the following classes # class DFT(torch.nn.Module):", "from the 1992 alogrithm [2] [1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM", "4 # size of a float epsilon = 10e-8 #", "\"\"\" def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True,", "be automatically broadcast to the right shape output_format : str", "Convert CQT kenral from time domain to freq domain #", "``True``. pad_mode : str The padding method. Default value is", "the code from the 1992 alogrithm [2] [1] Schörkhuber, Christian.", "We set the frequency range in the CQT filter instead", "final CQT result by dividing n_fft # basis_norm is for", "removes -0.0 elements, which leads to error in calculating phase", "Removing unwanted bottom bins # print(\"downsample_factor = \",self.downsample_factor) # print(CQT.shape)", "back to waveforms. It only works for the complex value", "kernels from numpy arrays to torch tensors wsin = torch.tensor(kernel_sin", "stft_inversion_params: stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params} recon_stft = self.to_stft(melspec, **mel_inversion_params) recon_audio", ": str Determine the return type. 'Magnitude' will return the", "trainable=trainable_STFT, output_format=\"Magnitude\", verbose=verbose, **kwargs) # Create filter windows for stft", "kernel for mel spectrogram start = time() mel_basis = mel(sr,", "spectrogram : torch.tensor It returns a tensor of spectrograms. ``shape", "correct inverse. If trainability is not required, it is recommended", "len_audio)``\\n 3. ``(num_audio, 1, len_audio)`` It will be automatically broadcast", "int The hop (or stride) size. Default value is ``None``", "# Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) return CQT*torch.sqrt(self.lenghts.view(-1,1)) class", "= trainable start = time() # Create filter windows for", "rand_phase) angles[:,:,:,1] = torch.sin(2 * np.pi * rand_phase) # Initializing", "as the forward STFT. sr : int The sampling rate", "setting `iSTFT=True` if you want to use `inverse`\") assert X.dim()==4", "It will be automatically broadcast to the right shape \"\"\"", "make it a torch tensor if verbose==True: print(\"Creating low pass", "is the same as the forward STFT. freq_scale : 'linear',", "scale is quasi-logarithmic. When ``True`` is used, the Mel scale", "cepstral coefficients (MFCCs) of the input signal. This algorithm first", "output_format self.earlydownsample = earlydownsample # TODO: activate early downsampling later", "The ending frequency for the highest frequency bin. If freq_scale", "or (hasattr(self, 'kernel_cos_inv') != True): raise NameError(\"Please activate the iSTFT", "verbose=verbose, **kwargs) self.m_mfcc = n_mfcc # attributes that will be", "the spacing between each frequency bin. When `linear` or `log`", "self.stride = hop_length self.center = center self.pad_amount = self.n_fft //", "spec_layer = Spectrogram.MelSpectrogram() >>> specs = spec_layer(x) \"\"\" def __init__(self,", "window='hann', center=True, pad_mode='reflect', momentum=0.99, device='cpu'): super().__init__() self.n_fft = n_fft self.win_length", "fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False, trainable_CQT=False, output_format='Complex',", "size for the STFT. Default value is 2048 n_mels :", "as the :func:`~nnAudio.Spectrogram.iSTFT` class, which is to convert spectrograms back", "used = {:.4f} seconds\".format(time()-start)) else: pass def forward(self, X, onesided=False,", "padding = nn.ConstantPad1d(self.pad_amount, 0) elif self.pad_mode == 'reflect': if self.num_samples", "the final MFCCs. Therefore, the Mel spectrogram part can be", "= pad_mode self.norm = norm self.output_format = output_format # creating", "number of timesteps, you can increase the speed by setting", "self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) if verbose==True: print(\"num_octave = \",", "torch.nn.Parameter(window_mask, requires_grad=trainable_window) self.register_parameter('window_mask', window_mask) else: self.register_buffer('window_mask', window_mask) if verbose==True: print(\"iSTFT", "self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1)) self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1)) # Making all these variables nn.Parameter,", "= torch.matmul(self.mel_basis, spec) return melspec def extra_repr(self) -> str: return", "using ``trainable_mel`` and ``trainable_STFT``. It only support type-II DCT at", "module or not. By default, it is False to save", "print(\"Creating low pass filter ...\", end='\\r') start = time() lowpass_filter", "requires_grad=self.trainable) # Applying window functions to the Fourier kernels window_mask", "str The windowing function for STFT. It uses ``scipy.signal.get_window``, please", "1/(2**(1/bins_per_octave)-1) # Creating lowpass filter and make it a torch", "is to calculate the Melspectrogram of the input signal. Input", "0) elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.n_fft//2) x =", "Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) # is", "self.hop_length CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) # Getting", "loss_threshold: if verbose: print(f\"Target error of {loss_threshold} reached. Stopping optimization.\")", "for Griffin-Lim. The default value is ``32`` hop_length : int", "padding(x) # CQT CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \\", "get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) CQT = torch.cat((CQT1, CQT),1) CQT", "window='hann', center=True, pad_mode='reflect', trainable=False, output_format='Magnitude', verbose=True): super().__init__() # norm arg", "losses = [] for i in range(max_steps): optimizer.zero_grad() pred_mel =", "int Number of bins per octave. Default is 12. trainable_STFT", "CQT filter instead of here. if verbose==True: print(\"Creating STFT kernels", "= torch.tensor(cqt_kernels.imag).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag =", "``(num_audio, len_audio)``\\n 3. ``(num_audio, 1, len_audio)`` It will be automatically", "= nn.ConstantPad1d(self.kernel_width//2, 0) elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2)", "CQT*self.downsample_factor # Normalize again to get same result as librosa", "pad_mode self.momentum = momentum self.device = device if win_length==None: self.win_length=n_fft", "using Griffin-Lim.\" if onesided: X = extend_fbins(X) # extend freq", "# wav2spec conversion rebuilt = torch.stft(inverse, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w,", "# Remember the minus sign for imaginary part elif output_format=='Phase':", "Workshop on Applications of Signal Processing to Audio and Acoustics", "= torch.tensor(wcos) if verbose==True: print(\"STFT kernels created, time used =", "hop = self.hop_length CQT = get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding)", "torch.tensor(kernel_sin, dtype=torch.float) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float) # In this way,", "melspec, mel_inversion_params=None, stft_inversion_params=None): default_mel_inversion_params = {} default_stft_inversion_params = {} mel_inversion_params", "else use ``onesided=False`` To make sure the inverse STFT has", "-------- >>> spec_layer = Spectrogram.CQT2010v2() >>> specs = spec_layer(x) \"\"\"", "time used = {:.4f} seconds\".format(time()-start)) else: pass def forward(self, x,", "is not functioning self.hop_length = hop_length self.center = center self.pad_mode", "def loss_fn(pred, target): pred = pred.unsqueeze(1) if pred.ndim == 3", ": bool If your spectrograms only have ``n_fft//2+1`` frequency bins,", "By default, it is False to save GPU memory. fmin", "win_length==None: self.win_length=n_fft else: self.win_length=win_length if hop_length==None: self.hop_length = n_fft//4 else:", "iSTFT module or not. By default, it is False to", "the forward STFT. center : bool Putting the iSTFT keneral", "bool If ``True``, it shows layer information. If ``False``, it", "return the phase of the STFT reuslt, shape = ``(num_samples,", "print(\"STFT kernels created, time used = {:.4f} seconds\".format(time()-start)) def forward(self,", "that it runs faster than the original 1992 algorithm, that", "requires_grad=trainable_kernels) kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels) self.register_parameter('kernel_sin', kernel_sin) self.register_parameter('kernel_cos', kernel_cos) else:", "the correct frequency. hop_length : int The hop (or stride)", "real = real[:, self.pad_amount:-self.pad_amount] else: if self.center: real = real[:,", "X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin_inv,", "basis # These cqt_kernel is already in the frequency domain", "* np.pi * rand_phase) # Initializing the rebuilt magnitude spectrogram", "\", self.n_octaves) # Calculate the lowest frequency bin for the", "kernels created, time used = {:.4f} seconds\".format(time()-start)) # print(\"Getting cqt", "# print(f'win_length={self.win_length}\\tw={self.w.shape}') inverse = torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length, win_length=self.win_length,", "torch.tensor(wsin) wcos = torch.tensor(wcos) if verbose==True: print(\"STFT kernels created, time", "with fixed number of timesteps, you can increase the speed", "def inverse(self,x_real,x_imag): \"\"\" Convert a batch of waveforms to CQT", "the frequency domain cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32)) if", "@ melspec).clamp(eps) pred_stft = nn.Parameter(pred_stft, requires_grad=True) sgd_kwargs[\"lr\"] = sgd_kwargs[\"lr\"] *", "seconds\".format(time()-start)) def forward(self, x, output_format=None): \"\"\" Convert a batch of", "sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # This is for", "``inverse`` method under the ``STFT`` class to save GPU/RAM memory.", "CQT = get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) # Getting the", "the bin spacing can be controlled by ``fmin`` and ``fmax``.", "# extend freq X_real, X_imag = X[:, :, :, 0],", "the STFT kernel, if ``True``, the time index is the", "# normalizing the phase # Using the final phase to", "(1 + self.momentum)) * tprev[:,:,:] # Phase normalization angles =", "(2 * N) W_r = torch.cos(k) W_i = torch.sin(k) V", "output_format can also be changed during the ``forward`` method. verbose", "create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # This is", "the phase # Using the final phase to reconstruct the", "-> str: return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format( self.n_fft,", "spectrogram to be return. Can be either ``Magnitude`` or ``Complex``", "``refresh_win=None`` to increase computational speed. \"\"\" if refresh_win==None: refresh_win=self.refresh_win assert", "will also be caluclated and the STFT kernels will be", "you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. The parameters", "required, it is recommended to use the ``inverse`` method under", "self.pad_amount = self.n_fft // 2 self.window = window self.win_length =", "before `torch.fft` was made avaliable. The inverse function only works", "float epsilon = 10e-8 # fudge factor for normalization ###", "bool To activate the iSTFT module or not. By default,", "# norm arg is not functioning self.hop_length = hop_length self.center", "freq_scale='no') wsin = kernel_sin * window wcos = kernel_cos *", "not. If ``True``, the gradients for STFT kernels will also", "# if self.pad_mode == 'constant': # padding = nn.ConstantPad1d(self.n_fft//2, 0)", "'Phase' will return the phase of the STFT reuslt, shape", "= pad_mode self.n_fft = n_fft self.power = power self.trainable_mel =", "updated during model training. Default value is ``False``. trainable_window :", ": bool Putting the STFT keneral at the center of", "frequency for the highest Mel filter bank. trainable_mel : bool", "\"\\nIf you have a magnitude spectrogram, please consider using Griffin-Lim.\"", "if self.pad_mode == 'constant': self.padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode", "the minus sign for imaginary part elif output_format=='Phase': return torch.atan2(-spec_imag+0.0,spec_real)", "STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``. The complex number", "if self.trainable==False: # Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) else:", "batch of waveforms to spectrograms. Parameters ---------- x : torch", "the Fourier kernels window_mask = torch.tensor(window_mask) wsin = kernel_sin *", "_, n_freq = mel_basis.shape melspec = melspec.detach().view(-1, n_mels, time) if", "method. Default value is 'reflect'. htk : bool When ``False``", "is 'hann'. freq_scale : 'linear', 'log', or 'no' Determine the", "``(num_audio, 1, len_audio)`` The correct shape will be inferred autommatically", "frequency. hop_length : int The hop (or stride) size. Default", "is same as the normalization used in librosa. window :", "number, shape = ``(num_samples, freq_bins,time_steps, 2)``; ``Phase`` will return the", "super().__init__() # norm arg is not functioning self.trainable = trainable", "kernels ...\", end='\\r') start = time() basis, self.n_fft, lenghts =", "need to deal with the filter and other tensors def", "window='hann', freq_scale='no', center=True, pad_mode='reflect', iSTFT=False, fmin=50, fmax=6000, sr=22050, trainable=False, output_format=\"Complex\",", "hop_length=hop_length, window=window, freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT, output_format=\"Magnitude\", verbose=verbose, **kwargs)", "verbose==True: print(\"Low pass filter created, time used = {:.4f} seconds\".format(time()-start))", "torch.tensor(create_lowpass_filter( band_center = 0.5, kernelLength=256, transitionBandwidth=0.001 ) ) # Broadcast", ": torch tensor Input signal should be in either of", "print(\"downsample_factor = \",self.downsample_factor) # print(CQT.shape) # print(self.lenghts.view(-1,1).shape) # Normalizing the", "melspec.shape batch_size, n_mels, time = shape[0], shape[-2], shape[-1] _, n_freq", "``fmin`` and ``fmax``. Setting the correct sampling rate is very", "self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) if verbose==True: print(\"CQT kernels created, time", "for the input audio. It is used to calculate the", "and without early downsampling are more or less the same", "* rand_phase) # Initializing the rebuilt magnitude spectrogram rebuilt =", "CQT = CQT[:,-self.n_bins:,:] # Removing unwanted bottom bins # print(\"downsample_factor", "wsin = torch.tensor(wsin) wcos = torch.tensor(wcos) if verbose==True: print(\"STFT kernels", "center=True, pad_mode='reflect', fmin=50, fmax=6000, sr=22050): super().__init__() self.stride = hop_length self.center", "output_format=='Magnitude': if self.trainable==False: # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) else:", "* sz_float = 4 # size of a float epsilon", "bins per octave. Default is 12. norm : int Normalization", "norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1) cqt_kernels_imag =", "refresh_win=self.refresh_win assert X.dim()==4 , \"Inverse iSTFT only works for complex", "stride) size. Default value is 512. fmin : float The", "= torch.rfft(v, 1, onesided=False) # TODO: Can make the W_r", "self.norm = norm self.output_format = output_format # creating kernels for", "= torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) class CQT(CQT1992v2): \"\"\"An abbreviation for", "angles[:, :,:,0] = torch.cos(2 * np.pi * rand_phase) angles[:,:,:,1] =", "'no' is used, the bin will start at 0Hz and", "self.pad_amount: raise AssertionError(\"Signal length shorter than reflect padding length (n_fft", "for i in range(max_steps): optimizer.zero_grad() pred_mel = mel_basis @ pred_stft", "window='hann', center=True, pad_mode='reflect', power=2.0, htk=False, fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False,", "``torch.nn.Module``. Parameters ---------- n_fft : int The window size. Default", "self.pad_mode = pad_mode self.norm = norm self.output_format = output_format #", "Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010). [2] Brown,", "hop_length = int(win_length // 4) self.output_format = output_format self.trainable =", "``True``. Please make sure the value is the same as", "create_cqt_kernels(Q, sr, fmin, n_bins, bins_per_octave, norm, window, fmax) self.register_buffer('lenghts', lenghts)", "these variables nn.Parameter, so that the model can be used", "this argument does nothing. Please make sure the value is", "nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.n_fft//2) x", "if output_format=='Magnitude': if self.trainable==False: # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1))", "melspec = melspec.detach().view(-1, n_mels, time) if random_start: pred_stft_shape = (batch_size,", "batch of waveforms to MFCC. Parameters ---------- x : torch", "output_format='Complex', earlydownsample=True, verbose=True): super().__init__() self.norm = norm # Now norm", "be either ``Magnitude`` or ``Complex`` or ``Phase``. Default value is", "is for the normalization in the end freqs = fmin", "= torch.tensor(wsin, dtype=torch.float) self.wcos = torch.tensor(wcos, dtype=torch.float) def forward(self,x): \"\"\"", "trainable=False, output_format='Magnitude', verbose=True): super().__init__() # norm arg is not functioning", "= extend_fbins(X) # extend freq X_real, X_imag = X[:, :,", "factor, 2**(self.n_octaves-1) # is make it same mag as 1992", "earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True): super().__init__() self.norm = norm # Now", "length (n_fft // 2).\") padding = nn.ReflectionPad1d(self.pad_amount) x = padding(x)", "# Removing unwanted top bins if self.norm: CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1))", "verbose==True: print(\"Early downsampling filter created, \\ time used = {:.4f}", "= conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1)) #", "filter created, time used = {:.4f} seconds\".format(time()-start)) # Caluate num", "banks size = {}, trainable_mel={}'.format( (*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT ) def", "{}'.format( (self.n_mfcc) ) class CQT1992(torch.nn.Module): \"\"\" This alogrithm uses the", "functioning self.hop_length = hop_length self.center = center self.pad_mode = pad_mode", "the n_bins'.format(fmax_t)) if self.earlydownsample == True: # Do early downsampling", "to error in calculating phase def inverse(self, X, onesided=True, length=None,", "trainable_STFT==True: # self.wsin = torch.nn.Parameter(self.wsin) # self.wcos = torch.nn.Parameter(self.wcos) def", "int(win_length // 4) self.n_fft = n_fft self.win_length = win_length self.stride", "Prepare the right shape to do inverse # if self.center:", "code from the 1992 alogrithm [2] [1] <NAME>. “CONSTANT-Q TRANSFORM", "the downsampled input is equivalent to the next lower octave.", "have different time steps if hasattr(self, 'w_sum')==False or refresh_win==True: self.w_sum", "Examples -------- >>> spec_layer = Spectrogram.CQT1992v2() >>> specs = spec_layer(x)", "to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None):", "filter ...\", end='\\r') start = time() lowpass_filter = torch.tensor(create_lowpass_filter( band_center", "is ``None``, therefore the higest CQT bin is inferred from", "please reduce the n_bins'.format(fmax_t)) if self.earlydownsample == True: # Do", "spectrograms. Parameters ---------- x_real : torch tensor Real part of", "self.mel_basis = torch.nn.Parameter(self.mel_basis) # if trainable_STFT==True: # self.wsin = torch.nn.Parameter(self.wsin)", "= padding(x_imag) # Watch out for the positive and negative", "\"\"\"An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation\"\"\"", "therefore the higest CQT bin is inferred from the ``n_bins``", "the return type. ``Magnitude`` will return the magnitude of the", "wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) else: self.register_buffer('wsin',", "= {}, trainable_mel={}'.format( (*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT ) def to_stft(self, melspec,", "If you have an input with fixed number of timesteps,", "Default value is ``False``. trainable_STFT : bool Determine if the", "# Doing STFT by using conv1d # remove redundant parts", "if verbose==True: print(\"Creating low pass filter ...\", end='\\r') start =", "classes # class DFT(torch.nn.Module): \"\"\" Experimental feature before `torch.fft` was", "algorithm,” IEEE Workshop on Applications of Signal Processing to Audio", "'reflect': if self.num_samples < self.pad_amount: raise AssertionError(\"Signal length shorter than", "self.verbose # SGD arguments default_sgd_kwargs = dict(lr=1e3, momentum=0.9) if sgd_kwargs:", "the method proposed in [1]. I slightly modify it so", "basis self.hop_length = hop_length self.pad_mode = pad_mode self.n_bins = n_bins", "n_fft, n_mels, fmin, fmax, htk=htk, norm=norm) mel_basis = torch.tensor(mel_basis) if", "I slightly modify it so that it runs faster than", "transform (STFT) of the input signal. Input signal should be", "which means ``n_fft//2+1`` bins Please make sure the value is", "bank. fmax : int The ending frequency for the highest", "\\ please reduce the n_bins'.format(fmax_t)) if self.earlydownsample == True: #", "Determine the return type. ``Magnitude`` will return the magnitude of", "# The section below is for developing purpose # Please", "filter windows for stft wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft, freq_bins=n_fft,", "torch.tensor It returns a tensor of spectrograms. shape = ``(num_samples,", "activate early downsampling later if possible # This will be", "support type-II DCT at the moment. Input signal should be", "to initialize this layer. Default value is 'cpu' \"\"\" def", "= torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self)", "we keep downsampling the input audio by a factor of", "for the original implmentation. ''' x = x.permute(0,2,1) # make", "freq_bins=None, hop_length=512, window='hann', freq_scale='no', center=True, pad_mode='reflect', fmin=50, fmax=6000, sr=22050): super().__init__()", "self.wsin, hop, self.padding) # Getting the top octave CQT x_down", "it shows layer information. If ``False``, it suppresses all prints", "can be made trainable using ``trainable_mel`` and ``trainable_STFT``. It only", "GPU memory. fmin : int The starting frequency for the", "in the CQT filter instead of here. if verbose==True: print(\"Creating", "{:.4f} seconds\".format(time()-start)) # print(\"Getting cqt kernel done, n_fft = \",self.n_fft)", ": torch tensor Imaginary part of the signal. \"\"\" x_real", "True if verbose==True: print(\"Creating early downsampling filter ...\", end='\\r') start", "\"\"\" Converting Magnitude spectrograms back to waveforms based on the", "suppresses all prints Returns ------- spectrogram : torch.tensor It returns", "{**default_stft_inversion_params, **stft_inversion_params} recon_stft = self.to_stft(melspec, **mel_inversion_params) recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params)", "trainable start = time() # Create filter windows for stft", "pred_stft loss = loss_fn(pred_mel, melspec) losses.append(loss.item()) loss.backward() optimizer.step() # Check", "bins_per_octave, norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1] print(\"CQT", "used = {:.4f} seconds\".format(time()-start)) else: self.downsample_factor=1. # Preparing CQT kernels", "can reuse the code from the 1992 alogrithm [2] [1]", "Default value is ``False`` output_format : str Control the spectrogram", "the beginning of the iSTFT kernel, if ``True``, the time", "CQT kernel is trainable or not. Default is ``False`` norm", "torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def forward_manual(self,x): \"\"\" Method for debugging", "float The frequency for the highest CQT bin. Default is", "spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing STFT by using", "as the 1992 algorithm. Therefore, we can reuse the code", "0.50, kernelLength=256, transitionBandwidth=0.001) ) # Broadcast the tensor to the", "...\", end='\\r') start = time() # print(\"Q = {}, fmin_t", "with the small CQT kernel. Everytime the input audio is", "is ``no``, this argument does nothing. fmax : int The", "to freq domain # These cqt_kernel is already in the", "fmin=50, fmax=6000, sr=22050): super().__init__() self.stride = hop_length self.center = center", "kernels created, time used = {:.4f} seconds\".format(time()-start)) def forward(self, x,", "norm=1, window='hann', center=True, pad_mode='reflect'): super().__init__() # norm arg is not", "x_imag.transpose_(1,2) # Prepare the right shape to do inverse #", "= (batch_size, n_freq, time) pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps) else:", ":, 0] /= np.sqrt(N) * 2 V[:, :, 1:] /=", "v = torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])], dim=2) Vc", "of the following shapes.\\n 1. ``(len_audio)``\\n 2. ``(num_audio, len_audio)``\\n 3.", "self.nonzero_indices = (self.w_sum>1e-10) else: pass real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices]) #", "# Prepare the right shape to do inverse x_imag.transpose_(1,2) #", "with n_fft real /= (self.n_fft) # Overlap and Add algorithm", "n_fft if hop_length==None: hop_length = int(win_length // 4) self.n_fft =", "and pred_stft.grad.max() < grad_threshold: if verbose: print(f\"Target max gradient of", "def inverse(self, X, onesided=True, length=None, refresh_win=True): \"\"\" This function is", "# We set the frequency range in the CQT filter", "def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512, window='hann', center=True, pad_mode='reflect', power=2.0,", "autommatically if the input follows these 3 shapes. Most of", "result, shape = ``(num_samples, freq_bins,time_steps)``; ``Complex`` will return the STFT", "used = {:.4f} seconds\".format(time()-start)) def forward(self, x, output_format=None): \"\"\" Convert", "recon_audio class MFCC(torch.nn.Module): \"\"\"This function is to calculate the Mel-frequency", "= time() kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.kernel_width, window='ones',", "extra_repr(self) -> str: return 'n_mfcc = {}'.format( (self.n_mfcc) ) class", "n_octaves determines how many resampling requires for the CQT n_filters", "= melspec.shape batch_size, n_mels, time = shape[0], shape[-2], shape[-1] _,", "end='\\r') start = time() sr, self.hop_length, self.downsample_factor, early_downsample_filter, \\ self.earlydownsample", "fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', earlydownsample=True, trainable=False, output_format='Magnitude',", "original implmentation. ''' log_spec = 10.0 * torch.log10(torch.max(S, self.amin)) log_spec", "if loss_threshold and loss < loss_threshold: if verbose: print(f\"Target error", "self.wsin = torch.tensor(wsin, dtype=torch.float) self.wcos = torch.tensor(wcos, dtype=torch.float) def forward(self,x):", "output type, either ``Magnitude``, ``Complex``, or ``Phase``. The output_format can", "the center of the time-step or not. If ``False``, the", "magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. Parameters ---------- onesided : bool", "is not required, it is recommended to use the ``inverse``", "if ``output_format='Magnitude'``; shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or", "cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real',", "to the next lower octave. The kernel creation process is", "self.mel_basis.detach() shape = melspec.shape batch_size, n_mels, time = shape[0], shape[-2],", "region where freq < 40Hz. \"\"\" def __init__(self, sr=22050, hop_length=512,", "cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag", "# Convert CQT kenral from time domain to freq domain", "# Making all these variables nn.Parameter, so that the model", "dtype=torch.float).unsqueeze(-1) # Decide if the Fourier kernels are trainable if", "hop, self.padding) # Getting the top octave CQT x_down =", "# make freq the last axis, since dct applies to", "kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.kernel_width, window='ones', freq_scale='no') #", "print(\"remainder = \", remainder) if remainder==0: # Calculate the top", "Calculate the top bin frequency fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave) self.fmin_t =", "“CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010). [2] Brown, <NAME>.", "optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs) losses = [] for i in", "If ``False``, it suppresses all prints. device : str Choose", "spec_real.pow(2) + spec_imag.pow(2) if self.trainable==True: return torch.sqrt(spec+1e-8) # prevent Nan", "the correct sampling rate is very important for calculating the", "When ``False`` is used, the Mel scale is quasi-logarithmic. When", "spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``, else", "__init__(self, n_fft=2048, freq_bins=None, hop_length=512, window='hann', freq_scale='no', center=True, pad_mode='reflect', fmin=50, fmax=6000,", "the center of the CQT kernel. Default value if ``True``.", "make sure the inverse STFT has the same output length", "-0.0 elements, which leads to error in calculating phase def", "size. Default value is 512. window : str The windowing", "waveforms to CQT spectrograms. Parameters ---------- x_real : torch tensor", "def forward(self,x): \"\"\" Convert a batch of waveforms to spectrums.", "# x_imag = padding(x_imag) # Watch out for the positive", "torch.tensor It returns a tensor of MFCCs. shape = ``(num_samples,", "= time() mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk,", "TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010). [2] Brown, <NAME>. and", "# self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable) # self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable)", "if (hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv') != True):", "torch.tensor(window_mask) wsin = kernel_sin * window_mask wcos = kernel_cos *", "number of Mel filter banks. The filter banks maps the", "wsin) self.register_parameter('wcos', wcos) else: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if trainable_CQT:", "(or stride) size. Default value is 512. window : str", "self.n_fft, _ = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False)", "created, time used = {:.4f} seconds\".format(time()-start)) # creating kernels for", "except in the very low frequency region where freq <", "all prints. device : str Choose which device to initialize", "for a more computational and memory efficient version. [1] Brown,", "of convoluting the STFT results with a gigantic CQT kernel", "fmax) self.register_buffer('lenghts', lenghts) cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1) cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1) if", "is 'hann' pad_mode : str The padding method. Default value", "conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) # Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))", "created, time used = {:.4f} seconds\".format(time()-start)) def forward(self, x, output_format=None):", "not. If ``True``, the gradients for CQT kernels will also", "controlled by ``fmin`` and ``fmax``. If 'no' is used, the", "self.n_mfcc = n_mfcc def _power_to_db(self, S): ''' Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db", "sr=sr, verbose=False) window_mask = get_window(window,int(win_length), fftbins=True) # For inverse, the", "banks will also be calculated and the Mel filter banks", "torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis=basis fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT", "fmax : int The ending frequency for the highest Mel", "for stft # self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as librosa", "pred_stft_shape = (batch_size, n_freq, time) pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps)", "torch.sqrt(self.lenghts.view(-1,1)) if output_format=='Magnitude': if self.trainable==False: # Getting CQT Amplitude CQT", "and ``fmax``. If 'no' is used, the bin will start", "# Creating lowpass filter and make it a torch tensor", "raise ParameterError('amin must be strictly positive') amin = torch.tensor([amin]) ref", "self.wsin, hop, self.padding) CQT = torch.cat((CQT1, CQT),1) CQT = CQT[:,-self.n_bins:,:]", "Broadcast the tensor to the shape that fits conv1d self.register_buffer('lowpass_filter',", "shape ``(batch, n_fft//2+1, timesteps)`` \"\"\" assert S.dim()==3 , \"Please make", "torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag',", "of {grad_threshold} reached. Stopping optimization.\") break pred_stft = pred_stft.detach().clamp(eps) **", "center self.pad_mode = pad_mode self.momentum = momentum self.device = device", "create_fourier_kernels(n_fft, win_length=win_length, freq_bins=freq_bins, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=verbose) kernel_sin", "W_r = torch.cos(k) W_i = torch.sin(k) V = Vc[:, :,", "self.top_db is not None: if self.top_db < 0: raise ParameterError('top_db", ": int The number of iterations for Griffin-Lim. The default", "k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi /", "(batch, freq_bins, timesteps, 2).\"\\ \"\\nIf you have a magnitude spectrogram,", "version. [1] Brown, <NAME>. and <NAME>. “An efficient algorithm for", "x = padding(x) spec_imag = conv1d(x, self.wsin, stride=self.stride) spec_real =", "cqt_kernel is already in the frequency domain cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32))", "return log_spec def _dct(self, x, norm=None): ''' Refer to https://github.com/zh217/torch-dct", ">>> spec_layer = Spectrogram.CQT1992v2() >>> specs = spec_layer(x) \"\"\" def", ">>> specs = spec_layer(x) \"\"\" def __init__(self, n_fft=2048, win_length=None, freq_bins=None,", "window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1) cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1)", "= - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2", "It returns a batch of waveforms. Examples -------- >>> spec_layer", "n_bins) self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) if verbose==True: print(\"num_octave =", "audio by a factor of 2 to convoluting it with", "self.hop_length, self.downsample_factor, early_downsample_filter, \\ self.earlydownsample = get_early_downsample_params(sr, hop_length, fmax_t, Q,", "verbose==True: print(\"num_octave = \", self.n_octaves) # Calculate the lowest frequency", "sure the inverse STFT has the same output length of", "mfcc = spec_layer(x) \"\"\" def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True,", "real and imag part. signal lies in the real part", "batch of waveforms to CQT spectrograms. Parameters ---------- x :", "the end of the output. refresh_win : bool Recalculating the", "kernel_sin_inv.unsqueeze(-1)) self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1)) # Making all these variables nn.Parameter, so", "inverse(self,x_real,x_imag): \"\"\" Convert a batch of waveforms to CQT spectrograms.", "conversion # print(f'win_length={self.win_length}\\tw={self.w.shape}') inverse = torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length,", "self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=freq_bins, window=window, freq_scale=freq_scale, fmin=fmin,", ") class CQT2010(torch.nn.Module): \"\"\" This algorithm is using the resampling", "self.register_buffer('kernel_cos', kernel_cos) # Decide if the window function is trainable", "-------- >>> spec_layer = Spectrogram.MFCC() >>> mfcc = spec_layer(x) \"\"\"", "for possible windowing functions. The default value is 'hann'. center", "= 10e-8 # fudge factor for normalization ### --------------------------- Spectrogram", "CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8) return CQT elif output_format=='Complex': return torch.stack((CQT_real,CQT_imag),-1) elif", "2 self.window = window self.win_length = win_length self.iSTFT = iSTFT", "= real[:, :length] return real def extra_repr(self) -> str: return", "to the right shape \"\"\" x = self.melspec_layer(x) x =", "modify it so that it runs faster than the original", "< 0: raise ParameterError('top_db must be non-negative') # make the", "W_i if norm == 'ortho': V[:, :, 0] /= np.sqrt(N)", "_ = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) #", "verbose==True: print(\"STFT kernels created, time used = {:.4f} seconds\".format(time()-start)) else:", "If trainability is not required, it is recommended to use", "be calculated and the Mel filter banks will be updated", "The window size for the STFT. Default value is 2048", "cqt_kernels_imag) # If center==True, the STFT window will be put", "https://github.com/zh217/torch-dct for the original implmentation. ''' x = x.permute(0,2,1) #", "a tensor of spectrograms. ``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;", "torch.cat((CQT1, CQT),1) CQT = CQT[:,-self.n_bins:,:] # Removing unwanted top bins", "= create_fourier_kernels(n_fft, win_length=win_length, freq_bins=freq_bins, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=verbose)", "octave. The kernel creation process is still same as the", "hop_length=512, fmin=220, fmax=None, n_bins=84, trainable_STFT=False, trainable_CQT=False, bins_per_octave=12, output_format='Complex', norm=1, window='hann',", "nn.ReflectionPad1d(self.n_fft//2) def forward(self,x, output_format=None): \"\"\" Convert a batch of waveforms", "will be inferred autommatically if the input follows these 3", "If you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. Parameters", "cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32)) if trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos", "= self.mel_basis.detach() shape = melspec.shape batch_size, n_mels, time = shape[0],", "= conv1d(x_imag, self.wcos, stride=self.stride) b2 = conv1d(x_imag, self.wsin, stride=self.stride) imag", "shape for batch-wise-time-wise multiplication # Create filter windows for inverse", "function is to calculate the Melspectrogram of the input signal.", "``refresh_win=False``. Else please keep ``refresh_win=True`` \"\"\" if (hasattr(self, 'kernel_sin_inv') !=", "def forward_manual(self,x): \"\"\" Method for debugging \"\"\" x = broadcast_dim(x)", "samples from the start and the end of the output.", "n_mels, time) if random_start: pred_stft_shape = (batch_size, n_freq, time) pred_stft", "def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, pad_mode='reflect',", "of MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``. Examples -------- >>>", "= torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable) # Applying window functions to the Fourier", "not functioning self.trainable = trainable self.hop_length = hop_length self.center =", "Signal Processing to Audio and Acoustics (pp. 1-4), Oct. 2013.", "= 1/(2**(1/bins_per_octave)-1) if verbose==True: print(\"Creating CQT kernels ...\", end='\\r') start", "if not loss.isfinite(): raise OverflowError(\"Overflow encountered in Mel -> STFT", "freq_scale is ``no``, this argument does nothing. fmax : int", "max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None): \"\"\" Best-attempt", "the highest Mel filter bank. trainable_mel : bool Determine if", "to get same result as librosa CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if", "= momentum self.device = device if win_length==None: self.win_length=n_fft else: self.win_length=win_length", "The frequency for the highest CQT bin. Default is ``None``,", "timesteps)\" # Initializing Random Phase rand_phase = torch.randn(*S.shape, device=self.device) angles", "fmin=fmin, fmax=fmax, sr=sr, verbose=verbose) kernel_sin = torch.tensor(kernel_sin, dtype=torch.float) kernel_cos =", "n_bins'.format(fmax_t)) if self.earlydownsample == True: # Do early downsampling if", "torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable) # Applying window functions to the Fourier kernels", "# Preparing CQT kernels if verbose==True: print(\"Creating CQT kernels ...\",", "is ``None``, which means ``n_fft//2+1`` bins Please make sure the", "is ``None`` which is equivalent to ``n_fft//4``. Please make sure", "``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.STFT() >>>", "torch.sin(k) V = Vc[:, :, :, 0] * W_r -", "result in complex number, shape = ``(num_samples, freq_bins, time_steps, 2)``;", "kernel_sin) self.register_buffer('kernel_cos', kernel_cos) # Decide if the window function is", "to create this window once to save time # Unless", "seconds\".format(time()-start)) print(\"Mel filter created, time used = {:.4f} seconds\".format(time()-start)) else:", "this argument does nothing. fmax : int The ending frequency", "self.wcos = torch.nn.Parameter(self.wcos) def forward(self, x): \"\"\" Convert a batch", "input signal. This algorithm first extracts Mel spectrograms from the", "self.trainable_STFT ) def to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None,", "torch.tensor( # create_lowpass_filter( # band_center = 0.50, # kernelLength=256, #", "we make a small CQT kernel covering only the top", "wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) # Prepare", "n_bins self.earlydownsample = earlydownsample # We will activate early downsampling", "-------- >>> spec_layer = Spectrogram.MelSpectrogram() >>> specs = spec_layer(x) \"\"\"", "gigantic CQT kernel covering the full frequency spectrum, we make", "the type of spectrogram to be return. Can be either", "``Magnitude`` will return the magnitude of the STFT result, shape", "self.pad_mode = pad_mode self.output_format = output_format # creating kernels for", "self.n_fft = n_fft self.freq_bins = freq_bins self.trainable = trainable self.pad_amount", "sr=sr, trainable=trainable_STFT, output_format=\"Magnitude\", verbose=verbose, **kwargs) # Create filter windows for", "-> STFT optimization\") if loss_threshold and loss < loss_threshold: if", "implmentation. ''' x = x.permute(0,2,1) # make freq the last", "frequency. n_mfcc : int The number of Mel-frequency cepstral coefficients", "the audio clips, then the discrete cosine transform is calcuated", "correct shape will be inferred autommatically if the input follows", "basis, self.n_fft, lenghts = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm,", "torch.sqrt(CQT.pow(2).sum(-1)) else: return torch.sqrt(CQT.pow(2).sum(-1)+1e-8) elif output_format=='Complex': return CQT elif output_format=='Phase':", "= spec_layer(x) \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,", "start and the end of the output. If your input", "a factor of 2 to convoluting it with the small", "developing purpose # Please don't use the following classes #", "Create filter windows for stft start = time() # Creating", "kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0) if iSTFT: self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1)) self.register_buffer('kernel_cos_inv',", "phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) class", "0.5, kernelLength=256, transitionBandwidth=0.001 ) ) # Broadcast the tensor to", "filter created, time used = {:.4f} seconds\".format(time()-start)) print(\"Mel filter created,", "from librosa.griffinlim. [1] <NAME>., <NAME>., & <NAME>. “A fast Griffin-Lim", "the signal. \"\"\" x_real = broadcast_dim(x_real) x_imag = broadcast_dim(x_imag) x_real.transpose_(1,2)", ": str Control the spectrogram output type, either ``Magnitude``, ``Complex``,", "kernels created, time used = {:.4f} seconds\".format(time()-start)) # creating kernels", "time() basis, self.n_fft, lenghts = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave,", "norm : string The default value is 'ortho'. Normalization for", "= torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels) self.register_parameter('wsin', wsin) self.register_parameter('wcos',", "and other tensors def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,", "verbose=True): super().__init__() # norm arg is not functioning self.trainable =", "layer information. If ``False``, it suppresses all prints device :", "is in the shape of (batch, freq_bins, timesteps, 2)\" #", "trainable or not. If ``True``, the gradients for CQT kernels", "hop_length : int The hop (or stride) size. Default value", "for stft kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft, win_length=win_length,", "def forward(self, x, output_format=None): \"\"\" Convert a batch of waveforms", "print(\"Getting cqt kernel done, n_fft = \",self.n_fft) # If center==True,", "please consider using Griffin-Lim.\" if onesided: X = extend_fbins(X) #", "creating kernels for stft # self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize", "if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.pad_amount, 0) elif self.pad_mode", "of a constant Q transform.” (1992). Parameters ---------- sr :", "* torch.log10(torch.max(S, self.amin)) log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref)) if", ":, 0] * W_r - Vc[:, :, :, 1] *", "time index is the center of the CQT kernel. Default", "self.n_bins = n_bins self.output_format = output_format self.earlydownsample = earlydownsample #", "When ``trainable=True`` and ``freq_scale!='no'``, there is no guarantee that the", "L2 normalization. Default is ``1``, which is same as the", "STFT. momentum : float The momentum for the update rule.", "a batch of magnitude spectrograms to waveforms. Parameters ---------- S", "# In this way, the inverse kernel and the forward", "the input spectrogram contains only half of the n_fft #", "all prints Returns ------- spectrogram : torch.tensor It returns a", "as log_spec so that it can be broadcasted batch_wise_max =", "ifft = e^(+2\\pi*j)*X # ifft(X_real) = (a1, a2) # ifft(X_imag)*1j", "wav2spec conversion rebuilt = torch.stft(inverse, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, pad_mode=self.pad_mode)", "= ``(num_samples, freq_bins, time_steps)``; 'Complex' will return the STFT result", "shape = ``(num_samples, freq_bins, time_steps, 2)``; 'Phase' will return the", "also be caluclated and the STFT kernels will be updated", "trainable : bool Determine if the STFT kenrels are trainable", "if return_extras: return pred_stft, pred_mel.detach(), losses return pred_stft def inverse(self,", "stride=self.stride) spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing STFT by", "(hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv') != True): raise", "top bin frequency fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave) self.fmin_t = fmax_t/2**(1-1/bins_per_octave) #", "input audio by a factor of 2 to convoluting it", "Default is ``False`` trainable_CQT : bool Determine if the frequency", "very important for calculating the correct frequency. n_fft : int", "norm=1, window='hann', center=True, pad_mode='reflect', trainable=False, output_format='Magnitude', verbose=True): super().__init__() # norm", "mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm) mel_basis", "waveform, please set `length` as your intended waveform length. By", "``True``, the gradients for Mel filter banks will also be", "CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag), (fourier_real, fourier_imag)) CQT = torch.stack((CQT_real,-CQT_imag),-1)", "automatically broadcast to the right shape \"\"\" output_format = output_format", "n_iter self.center = center self.pad_mode = pad_mode self.momentum = momentum", "is the same as the forward STFT. momentum : float", "CQT kernels will also be caluclated and the CQT kernels", "of (batch, freq_bins, timesteps)\" # Initializing Random Phase rand_phase =", "normalize the final CQT result by dividing n_fft # basis_norm", "make sure your input is in the shape of (batch,", "as 1992 CQT = CQT*self.downsample_factor if output_format=='Magnitude': # Getting CQT", "= {:.4f} seconds\".format(time()-start)) else: pass if trainable_mel: # Making everything", "It returns a tensor of spectrograms. ``shape = (num_samples, freq_bins,time_steps)``", "return_extras: return pred_stft, pred_mel.detach(), losses return pred_stft def inverse(self, melspec,", "0) elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2) x =", "spec_real[:, :self.freq_bins, :] spec_imag = spec_imag[:, :self.freq_bins, :] if output_format=='Magnitude':", "is very important for calculating the correct frequency. n_fft :", "frequency bin. If freq_scale is ``no``, this argument does nothing.", "``Phase``. Default value is ``Complex``. \"\"\" output_format = output_format or", "spectrogram rebuilt = torch.zeros(*angles.shape, device=self.device) for _ in range(self.n_iter): tprev", "# spec2wav conversion # print(f'win_length={self.win_length}\\tw={self.w.shape}') inverse = torch.istft(S.unsqueeze(-1) * angles,", "self.wcos, stride=self.stride) return (real, -imag) def inverse(self,x_real,x_imag): \"\"\" Convert a", "the right shape to do inverse x_imag.transpose_(1,2) # Prepare the", "conv1d(x, self.cqt_kernels_real, stride=self.hop_length) CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) # Getting", "{} mel_inversion_params = mel_inversion_params or {} stft_inversion_params = stft_inversion_params or", "mel bins. Default value is 128. hop_length : int The", "the end of the output. If your input spectrograms X", "cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1) cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1) if trainable: cqt_kernels_real =", "**stft_inversion_params} recon_stft = self.to_stft(melspec, **mel_inversion_params) recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params) return", "is the center of the iSTFT kernel. Default value if", "size = {}, CQT kernel size = {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,)", "very low frequency region where freq < 40Hz. Parameters ----------", "V[:, :, 0] /= np.sqrt(N) * 2 V[:, :, 1:]", "n_fft//4 else: self.hop_length = hop_length # Creating window function for", "else: self.register_buffer('kernel_sin', kernel_sin) self.register_buffer('kernel_cos', kernel_cos) # Decide if the window", "model training. Default value is ``False`` output_format : str Determine", "gradients for Mel filter banks will also be calculated and", "if self.trainable==True: return torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0)", "x_real = broadcast_dim(x_real) x_imag = broadcast_dim(x_imag) x_real.transpose_(1,2) # Prepare the", "Random Phase rand_phase = torch.randn(*S.shape, device=self.device) angles = torch.empty((*S.shape,2), device=self.device)", "``False``, it suppresses all prints. device : str Choose which", "librosa # self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width print(\"Creating STFT kernels ...\", end='\\r') start =", "= torch.sin(2 * np.pi * rand_phase) # Initializing the rebuilt", "for the calculation of a constant Q transform.” (1992). This", "= self.n_fft // 2 self.window = window self.win_length = win_length", "``output_format='Magnitude'``; ``shape = (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;", ": int The window size. Default value is 2048. freq_bins", "non-negative') # make the dim same as log_spec so that", "end='\\r') start = time() lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.5,", "be strictly positive') amin = torch.tensor([amin]) ref = torch.abs(torch.tensor([ref])) self.register_buffer('amin',", "the rebuilt magnitude spectrogram rebuilt = torch.zeros(*angles.shape, device=self.device) for _", "X = extend_fbins(X) # extend freq X_real, X_imag = X[:,", "the frequency range in the CQT filter instead of here.", "window_mask if self.trainable==False: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if self.trainable==True: wsin", "arg is not functioning self.hop_length = hop_length self.center = center", "``None`` n_bins : int The total numbers of CQT bins.", "**mel_inversion_params} if stft_inversion_params: stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params} recon_stft = self.to_stft(melspec,", "= conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) # Getting CQT Amplitude CQT =", "the :func:`~nnAudio.Spectrogram.iSTFT` class, which is to convert spectrograms back to", "frequency domain cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1) cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1) if trainable:", "broadcast to the right shape \"\"\" x = self.melspec_layer(x) x", "(or stride) size. Default value is ``None`` which is equivalent", "part can be made trainable using ``trainable_mel`` and ``trainable_STFT``. It", "'constant': padding = nn.ConstantPad1d(self.pad_amount, 0) elif self.pad_mode == 'reflect': if", "is to calculate the short-time Fourier transform (STFT) of the", "the input audio by a factor of 2 to convoluting", "W_r - Vc[:, :, :, 1] * W_i if norm", "= get_window(window,int(win_length), fftbins=True) # For inverse, the Fourier kernels do", "1, onesided=False) # TODO: Can make the W_r and W_i", "correct frequency. hop_length : int The hop (or stride) size.", "so that it can be used later in inverse self.register_buffer('window_mask',", "time-step or not. If ``False``, the time index is the", "kernels if verbose==True: print(\"Creating CQT kernels ...\", end='\\r') start =", "early downsampling filter ...\", end='\\r') start = time() sr, self.hop_length,", "is ``False``. verbose : bool If ``True``, it shows layer", "frequency axis x_shape = x.shape N = x_shape[-1] v =", "created, time used = {:.4f} seconds\".format(time()-start)) # Caluate num of", "cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real)", "= rebuilt # Saving previous rebuilt magnitude spec # spec2wav", "== 'reflect': # padding = nn.ReflectionPad1d(self.n_fft//2) # x_real = padding(x_real)", "`length` as your intended waveform length. By default, ``length=None``, which", "= np.ceil(Q * sr / freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts',", "care. Parameters ---------- n_fft : int The window size. Default", "dtype=torch.float) # In this way, the inverse kernel and the", "torch.tensor([amin]) ref = torch.abs(torch.tensor([ref])) self.register_buffer('amin', amin) self.register_buffer('ref', ref) self.top_db =", "the n_fft to mel bins. Default value is 128. hop_length", "size. Default value is 512. fmin : float The frequency", "the middle, and paddings at the beginning # and ending", "self.norm: CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) # Normalizing", "cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag", "determines how many resampling requires for the CQT n_filters =", "/ bins_per_octave)) # print(\"n_octaves = \", self.n_octaves) # Calculate the", "If ``False``, it suppresses all prints Returns ------- spectrogram :", "the right shape output_format : str Control the type of", "b1) a1 = conv1d(x_real, self.wcos, stride=self.stride) a2 = conv1d(x_real, self.wsin,", "real = a1 - b2 real = real.squeeze(-2)*self.window_mask # Normalize", "_dct(self, x, norm=None): ''' Refer to https://github.com/zh217/torch-dct for the original", "index is the center of the CQT kernel. Default value", "conv1d, conv2d, fold import numpy as np from time import", "...\", end='\\r') start = time() sr, self.hop_length, self.downsample_factor, early_downsample_filter, \\", "freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave)) lenghts", "connect all the frames real = overlap_add(real, self.stride) # Prepare", "of waveforms to spectrums. Parameters ---------- x : torch tensor", "sqrt(0) due to output=0 else: return torch.sqrt(spec) elif output_format=='Complex': return", "and the forward kernel do not share the same memory...", "forward(self, X, onesided=False, length=None, refresh_win=None): \"\"\" If your spectrograms only", "use :func:`~nnAudio.Spectrogram.Griffin_Lim`. The parameters (e.g. n_fft, window) need to be", "mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm) mel_basis = torch.tensor(mel_basis)", "**stft_inversion_params) return recon_audio class MFCC(torch.nn.Module): \"\"\"This function is to calculate", "stride=self.stride) b2 = conv1d(x_imag, self.wsin, stride=self.stride) imag = a2+b1 real", "from the start and the end of the output. refresh_win", "sr, fmin, n_bins, bins_per_octave, norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels", "increase the speed by setting ``refresh_win=False``. Else please keep ``refresh_win=True``", "self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) #", "center self.pad_amount = self.n_fft // 2 self.refresh_win = refresh_win start", "``1``, which is same as the normalization used in librosa.", "seconds\".format(time()-start)) else: pass if trainable_mel: # Making everything nn.Parameter, so", "= time() cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q, sr, fmin, n_bins,", "to ``n_fft//4``. window : str The windowing function for STFT.", "time to frequency domain transformation kernel for the input audio", "for stft start = time() # Creating kernel for mel", "frequency. n_fft : int The window size for the STFT.", "the STFT. Default value is 2048 n_mels : int The", "= self.melspec_layer(x) x = self._power_to_db(x) x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:] return", "constant Q transform.” (1992). Early downsampling factor is to downsample", "gradients for CQT kernels will also be caluclated and the", "return torch.stack((phase_real,phase_imag), -1) def forward_manual(self,x): \"\"\" Method for debugging \"\"\"", "value is 'Magnitude'. verbose : bool If ``True``, it shows", "= a2+b1 real = a1-b2 return (real/self.n_fft, imag/self.n_fft) class iSTFT(torch.nn.Module):", "dim=2) Vc = torch.rfft(v, 1, onesided=False) # TODO: Can make", "target = target.unsqueeze(1) if target.ndim == 3 else target loss", "activate the iSTFT module by setting `iSTFT=True` if you want", "Fourier transform (STFT) of the input signal. Input signal should", "size = {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT2010(torch.nn.Module): \"\"\" This", "Therefore, we can reuse the code from the 1992 alogrithm", "the same as the forward STFT. hop_length : int The", "= conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1)) # compute real and imag part.", "if remainder==0: # Calculate the top bin frequency fmax_t =", "bool Determine if the time to frequency domain transformation kernel", "right shape to do inverse x_imag.transpose_(1,2) # Prepare the right", "2. ``(num_audio, len_audio)``\\n 3. ``(num_audio, 1, len_audio)`` The correct shape", "instead of here. if verbose==True: print(\"Creating STFT kernels ...\", end='\\r')", "self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) # If center==True,", "top_db=80.0, **kwargs): super().__init__() self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs) self.m_mfcc =", "STFT kernel, if ``True``, the time index is the center", "need for center self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window, freq_scale='no',", "length of the original waveform, please set `length` as your", "torch.cat((CQT1, CQT),1) CQT = CQT[:,-self.n_bins:,:] # Removing unwanted bottom bins", "torch.tensor(fft_basis.imag.astype(np.float32)) if verbose==True: print(\"CQT kernels created, time used = {:.4f}", "value is 'reflect'. inverse : bool To activate the iSTFT", "``Phase`` will return the phase of the STFT reuslt, shape", "transitionBandwidth=0.001) ) # Broadcast the tensor to the shape that", "bool Determine if the window function is trainable or not.", "\"\"\"This function is to calculate the short-time Fourier transform (STFT)", "octave. Default is 12. norm : bool Normalization for the", "low pass filter ...\", end='\\r') start = time() lowpass_filter =", "np.pi * rand_phase) # Initializing the rebuilt magnitude spectrogram rebuilt", "and make it a torch tensor if verbose==True: print(\"Creating low", "default, ``length=None``, which will remove ``n_fft//2`` samples from the start", "hop_length = int(win_length // 4) self.n_fft = n_fft self.win_length =", "Early downsampling factor is to downsample the input audio to", "self.win_length = win_length self.n_iter = n_iter self.center = center self.pad_mode", "verbose=verbose) kernel_sin = torch.tensor(kernel_sin, dtype=torch.float) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float) #", "output_format=='Magnitude': spec = spec_real.pow(2) + spec_imag.pow(2) if self.trainable==True: return torch.sqrt(spec+1e-8)", "padding = nn.ReflectionPad1d(self.n_fft//2) # x_real = padding(x_real) # x_imag =", "self.cqt_kernels_imag, hop, self.padding) # Getting the top octave CQT x_down", "ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \\", "automatically broadcast to the right shape \"\"\" x = broadcast_dim(x)", "= CQT*torch.sqrt(self.lenghts.view(-1,1,1)) # Normalizing the output with the downsampling factor,", "the center of the STFT kernel. Default value if ``True``.", "torch.zeros(*angles.shape, device=self.device) for _ in range(self.n_iter): tprev = rebuilt #", "``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``. Parameters ----------", "the CQT result. basis_norm : int Normalization for the CQT", "str The padding method. Default value is 'reflect'. trainable :", "self.trainable_mel = trainable_mel self.trainable_STFT = trainable_STFT self.verbose = verbose #", "CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) # Normalizing the", "\"\"\" output_format = output_format or self.output_format x = broadcast_dim(x) if", "calculation of a constant Q transform.” (1992). early downsampling factor", "computational and memory efficient version. [1] Brown, <NAME>. and <NAME>.", "to initialize this layer. Default value is 'cpu'. Returns -------", "spectrogram classes \"\"\" # 0.2.0 import torch import torch.nn as", "freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr) self.wsin = torch.tensor(wsin, dtype=torch.float) self.wcos =", "you can increase the speed by setting ``refresh_win=False``. Else please", "of the iSTFT kernel. Default value if ``True``. Please make", "for the lowest CQT bin. Default is 32.70Hz, which coresponds", "imag part. signal lies in the real part real =", "or less the same except in the very low frequency", "freq X_real, X_imag = X[:, :, :, 0], X[:, :,", "\"\"\" output_format = output_format or self.output_format self.num_samples = x.shape[-1] x", "value is 2048. n_iter=32 : int The number of iterations", "``n_fft//2+1`` bins. hop_length : int The hop (or stride) size.", "``True``, the time index is the center of the STFT", "for the input audio is trainable or not. Default is", "time used = {:.4f} seconds\".format(time()-start)) # Caluate num of filter", "self.register_buffer('early_downsample_filter', early_downsample_filter) if verbose==True: print(\"Early downsampling filter created, \\ time", "loss verbose = verbose or self.verbose # SGD arguments default_sgd_kwargs", "# Create filter windows for inverse kernel_sin, kernel_cos, _, _,", "requires for the kernel # n_octaves determines how many resampling", "make the default setting same as librosa if win_length==None: win_length", "Griffin-Lim\"[1]. This Griffin Lim is a direct clone from librosa.griffinlim.", "the value is the same as the forward STFT. fmin", "it runs faster than the original 1992 algorithm, that is", "Determine if the time to frequency domain transformation kernel for", "The number of iterations for Griffin-Lim. The default value is", "Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db for the original implmentation. ''' log_spec =", "Returns ------- spectrogram : torch.tensor It returns a batch of", "Determine if the Mel filter banks are trainable or not.", "b2 real = real.squeeze(-2)*self.window_mask # Normalize the amplitude with n_fft", "= Spectrogram.CQT1992v2() >>> specs = spec_layer(x) \"\"\" def __init__(self, sr=22050,", "CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': # Getting CQT Amplitude return", "print(\"Early downsampling filter created, \\ time used = {:.4f} seconds\".format(time()-start))", "torch.nn.functional import conv1d, conv2d, fold import numpy as np from", "= torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self) -> str: return", "CQT kernel size. The result with and without early downsampling", "-imag) def inverse(self,x_real,x_imag): \"\"\" Convert a batch of waveforms to", "I call it version 2. [1] Brown, <NAME>. and <NAME>.", "Saving previous rebuilt magnitude spec # spec2wav conversion # print(f'win_length={self.win_length}\\tw={self.w.shape}')", "iSTFT: self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1)) self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1)) # Making all these variables", "bins. hop_length : int The hop (or stride) size. Default", "``STFT`` class to save GPU/RAM memory. When ``trainable=True`` and ``freq_scale!='no'``,", "or not. If ``False``, the time index is the beginning", "kernel_cos = torch.tensor(kernel_cos, dtype=torch.float) # In this way, the inverse", "spectrograms from the audio clips, then the discrete cosine transform", "shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``; Examples", "spec = spec_real.pow(2) + spec_imag.pow(2) if self.trainable==True: return torch.sqrt(spec+1e-8) #", "spec # spec2wav conversion # print(f'win_length={self.win_length}\\tw={self.w.shape}') inverse = torch.istft(S.unsqueeze(-1) *", "conv1d(x_imag, self.wsin, stride=self.stride) imag = a2+b1 real = a1-b2 return", "end='\\r') start = time() # print(\"Q = {}, fmin_t =", "conv1d(x, self.wcos, stride=self.hop_length) fourier_imag = conv1d(x, self.wsin, stride=self.hop_length) # CQT", "Fourier Transform (STFT) # We set the frequency range in", "spectrum, we make a small CQT kernel covering only the", "windows for inverse kernel_sin, kernel_cos, _, _, window_mask = create_fourier_kernels(n_fft,", "the top octave kernel self.fmin_t = fmin*2**(self.n_octaves-1) remainder = n_bins", "return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))", "x = broadcast_dim(x) if self.center: if self.pad_mode == 'constant': padding", "to use `inverse`\") assert X.dim()==4 , \"Inverse iSTFT only works", "to calculate filter_cutoff and creating CQT kernels Q = 1/(2**(1/bins_per_octave)-1)", "cosine transform is calcuated to obtain the final MFCCs. Therefore,", "self.lowpass_filter = torch.tensor( # create_lowpass_filter( # band_center = 0.50, #", "the model can be used with nn.Parallel # self.kernel_sin =", "padding = nn.ConstantPad1d(self.n_fft//2, 0) # elif self.pad_mode == 'reflect': #", "if the Fourier kernels are trainable if trainable_kernels: # Making", "it with the small CQT kernel. Everytime the input audio", "self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs) self.m_mfcc = n_mfcc # attributes", "layer information. If ``False``, it suppresses all prints Returns -------", "S): \"\"\" Convert a batch of magnitude spectrograms to waveforms.", "import time from nnAudio.librosa_functions import * from nnAudio.utils import *", "will start at 0Hz and end at Nyquist frequency with", "the top bin frequency fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) else: # Calculate", "in complex number, shape = ``(num_samples, freq_bins, time_steps, 2)``; 'Phase'", "return 'STFT kernel size = {}, CQT kernel size =", "if verbose==True: print(\"num_octave = \", self.n_octaves) # Calculate the lowest", "memory efficient version. [1] Brown, <NAME>. and <NAME>. “An efficient", "kernels are trainable or not. If ``True``, the gradients for", "Q = 1/(2**(1/bins_per_octave)-1) # Creating lowpass filter and make it", "torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels) self.register_parameter('kernel_sin', kernel_sin) self.register_parameter('kernel_cos', kernel_cos) else: self.register_buffer('kernel_sin', kernel_sin) self.register_buffer('kernel_cos',", "This algorithm first extracts Mel spectrograms from the audio clips,", "conv2d, fold import numpy as np from time import time", "pred = pred.unsqueeze(1) if pred.ndim == 3 else pred target", "torch.tensor It returns a batch of waveforms. Examples -------- >>>", "uses ``scipy.signal.get_window``, please refer to scipy documentation for possible windowing", "{:.4f} seconds\".format(time()-start)) print(\"Mel filter created, time used = {:.4f} seconds\".format(time()-start))", "``(len_audio)``\\n 2. ``(num_audio, len_audio)``\\n 3. ``(num_audio, 1, len_audio)`` The correct", "filter bank. trainable_mel : bool Determine if the Mel filter", "# Calculate num of filter requires for the kernel #", "function for stft and istft later self.w = torch.tensor(get_window(window, int(self.win_length),", "time used = {:.4f} seconds\".format(time()-start)) def forward(self,x, output_format=None): \"\"\" Convert", "1, len_audio)`` The correct shape will be inferred autommatically if", "= X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1))", "'Mel filter banks size = {}, trainable_mel={}'.format( (*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT", "or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.CQT2010v2() >>> specs", "dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N) W_r", "value is ``False``. fmin : int The starting frequency for", "The hop (or stride) size. Default value is 512. fmin", "window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window) self.register_parameter('window_mask', window_mask) else: self.register_buffer('window_mask', window_mask) if", "update rule angles[:,:,:] = rebuilt[:,:,:] - (self.momentum / (1 +", "verbose==True: print(\"CQT kernels created, time used = {:.4f} seconds\".format(time()-start)) def", "Fourier Kernel size={}, iSTFT={}, trainable={}'.format( self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable )", "requires_grad=self.trainable) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) # Prepare the shape of", "please refer to scipy documentation for possible windowing functions. The", "mel_basis) else: self.register_buffer('mel_basis', mel_basis) # if trainable_mel==True: # self.mel_basis =", "padding(x) # STFT fourier_real = conv1d(x, self.wcos, stride=self.hop_length) fourier_imag =", "``n_fft//2+1`` frequency bins, please use ``onesided=True``, else use ``onesided=False`` To", "= time() basis, self.n_fft, lenghts = create_cqt_kernels(Q, sr, self.fmin_t, n_filters,", "very important for calculating the correct frequency. n_mfcc : int", "to spectrograms. Parameters ---------- x : torch tensor Input signal", "fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect', trainable=False, output_format='Magnitude',", "= torch.tensor(basis.real.astype(np.float32)).unsqueeze(1) cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real,", "if ``output_format='Magnitude'``; ``shape = (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or", "forward STFT. fmin : int The starting frequency for the", "bins_per_octave : int Number of bins per octave. Default is", "trainable_STFT=False, verbose=True, **kwargs): super().__init__() self.stride = hop_length self.center = center", ": str Control the type of spectrogram to be return.", "Nyquist frequency with linear spacing. Please make sure the value", "padding(x_imag) # Watch out for the positive and negative signs", "the input signal. This algorithm first extracts Mel spectrograms from", "x = broadcast_dim(x) if self.earlydownsample==True: x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)", "**mel_inversion_params) recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params) return recon_audio class MFCC(torch.nn.Module): \"\"\"This", "window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr) self.wsin = torch.tensor(wsin, dtype=torch.float) self.wcos", "str Control the spectrogram output type, either ``Magnitude``, ``Complex``, or", "loss.isfinite(): raise OverflowError(\"Overflow encountered in Mel -> STFT optimization\") if", "bool Putting the STFT keneral at the center of the", "start = time() # print(\"Q = {}, fmin_t = {},", "since dct applies to the frequency axis x_shape = x.shape", "self.center = center self.pad_mode = pad_mode self.norm = norm self.output_format", "= x # Preparing a new variable for downsampling for", "n_mels : int The number of Mel filter banks. The", "kernel covering the full frequency spectrum, we make a small", "octave. Then we keep downsampling the input audio by a", "= create_fourier_kernels(n_fft=n_fft, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr) self.wsin =", "downsampled, the CQT relative to the downsampled input is equivalent", "3 shapes. Most of the arguments follow the convention from", "torch.nn.Parameter(self.mel_basis) # if trainable_STFT==True: # self.wsin = torch.nn.Parameter(self.wsin) # self.wcos", "self.pad_mode == 'reflect': # padding = nn.ReflectionPad1d(self.n_fft//2) # x_real =", "break pred_stft = pred_stft.detach().clamp(eps) ** 0.5 pred_stft = pred_stft.view((*shape[:-2], n_freq,", "inverse = torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, center=self.center)", "correct frequency. n_mfcc : int The number of Mel-frequency cepstral", "hop_length=None, win_length=None, window='hann', center=True, pad_mode='reflect', momentum=0.99, device='cpu'): super().__init__() self.n_fft =", "lenghts = create_cqt_kernels(Q, sr, fmin, n_bins, bins_per_octave, norm, window, fmax)", "``n_fft//4``. window : str The windowing function for STFT. It", "downsampling filter ...\", end='\\r') start = time() sr, self.hop_length, self.downsample_factor,", "torch.optim.SGD([pred_stft], **sgd_kwargs) losses = [] for i in range(max_steps): optimizer.zero_grad()", "CQT CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) CQT_imag = conv1d(x, self.cqt_kernels_imag,", "``False``. trainable_window : bool Determine if the window function is", "dividing n_fft # basis_norm is for normalizing basis self.hop_length =", "/= np.sqrt(N / 2) * 2 V = 2 *", "created, time used = {:.4f} seconds\".format(time()-start)) def forward(self,x, output_format=None): \"\"\"", "is downsampled, the CQT relative to the downsampled input is", "to MFCC. Parameters ---------- x : torch tensor Input signal", "per octave. Default is 12. norm : bool Normalization for", "value is the same as the forward STFT. sr :", "trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels) self.register_parameter('wsin',", "is not ``None``. bins_per_octave : int Number of bins per", "from nnAudio.utils import * sz_float = 4 # size of", "CQT1992(torch.nn.Module): \"\"\" This alogrithm uses the method proposed in [1].", "of {loss_threshold} reached. Stopping optimization.\") break if grad_threshold and pred_stft.grad.max()", "inverse kernel and the forward kernel do not share the", "for the calculation of a constant Q transform.” (1992). Early", "same as ``torch.nn.Module``. Parameters ---------- n_fft : int The window", "to the right shape output_format : str Control the type", "htk : bool When ``False`` is used, the Mel scale", "the bin will start at 0Hz and end at Nyquist", "_power_to_db if amin <= 0: raise ParameterError('amin must be strictly", "frequency fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave) self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the", "CQT*self.downsample_factor if output_format=='Magnitude': # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) elif", "iSTFT(torch.nn.Module): \"\"\"This class is to convert spectrograms back to waveforms.", "int Normalization for the CQT kernels. ``1`` means L1 normalization,", "is in the shape of (batch, freq_bins, timesteps, 2).\"\\ \"\\nIf", "verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs): super().__init__() self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose,", "if self.center: real = real[:, self.pad_amount:self.pad_amount + length] else: real", "if self.num_samples < self.pad_amount: raise AssertionError(\"Signal length shorter than reflect", "the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps,", "self.register_parameter('kernel_cos', kernel_cos) else: self.register_buffer('kernel_sin', kernel_sin) self.register_buffer('kernel_cos', kernel_cos) # Decide if", "self.center = center self.pad_mode = pad_mode self.output_format = output_format #", "information. If ``False``, it suppresses all prints device : str", "be caluclated and the STFT kernels will be updated during", "calucate the correct ``fmin`` and ``fmax``. Setting the correct sampling", "if ``True``. pad_mode : str The padding method. Default value", "'kernel_cos_inv') != True): raise NameError(\"Please activate the iSTFT module by", "previous rebuilt magnitude spec # spec2wav conversion # print(f'win_length={self.win_length}\\tw={self.w.shape}') inverse", "by a factor of 2 to convoluting it with the", "number, shape = ``(num_samples, freq_bins, time_steps, 2)``; 'Phase' will return", "Vc[:, :, :, 0] * W_r - Vc[:, :, :,", "self.trainable ) class MelSpectrogram(torch.nn.Module): \"\"\"This function is to calculate the", "the note C0. fmax : float The frequency for the", "Fourier kernels do not need to be windowed window_mask =", "self.top_db = top_db self.n_mfcc = n_mfcc def _power_to_db(self, S): '''", "kernel_cos_inv.unsqueeze(-1)) # Making all these variables nn.Parameter, so that the", "returns a batch of waveforms. Examples -------- >>> spec_layer =", "- target).pow(2).sum(-2).mean() return loss verbose = verbose or self.verbose #", "is same as ``torch.nn.Module``. This alogrithm uses the method proposed", "= Spectrogram.STFT() >>> specs = spec_layer(x) \"\"\" def __init__(self, n_fft=2048,", "2**(self.n_octaves-1) is make it # same mag as 1992 CQT", "value is ``0.99``. device : str Choose which device to", "Trying to normalize as librosa # self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width print(\"Creating STFT kernels", "return V.permute(0,2,1) # swapping back the time axis and freq", "= trainable self.pad_amount = self.n_fft // 2 self.window = window", "print(\"Creating CQT kernels ...\", end='\\r') start = time() cqt_kernels, self.kernel_width,", "banks will be updated during model training. Default value is", "hop, self.padding) CQT = torch.cat((CQT1, CQT),1) CQT = CQT[:,-self.n_bins:,:] #", "STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window, freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT, output_format=\"Magnitude\",", "of bins per octave. Default is 12. norm : bool", "= spec_layer(x) \"\"\" def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',", "fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False, verbose=True, **kwargs): super().__init__() self.stride =", "tensor Spectrogram of the shape ``(batch, n_fft//2+1, timesteps)`` \"\"\" assert", "Q = 1/(2**(1/bins_per_octave)-1) print(\"Creating CQT kernels ...\", end='\\r') start =", "* window_mask if self.trainable==False: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if self.trainable==True:", ": int The total numbers of CQT bins. Default is", "kernel creation process is still same as the 1992 algorithm.", "to be the same as the STFT in order to", "self.num_samples < self.pad_amount: raise AssertionError(\"Signal length shorter than reflect padding", "this model can support nn.DataParallel mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel) self.register_parameter('mel_basis',", "self.w = torch.tensor(get_window(window, int(self.win_length), fftbins=True), device=device).float() def forward(self, S): \"\"\"", "raise NameError(\"Please activate the iSTFT module by setting `iSTFT=True` if", "= self.hop_length CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) #", "value is 'ortho'. Normalization for DCT basis **kwargs Other arguments", "self.trainable_mel, self.trainable_STFT ) def to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False,", "with linear spacing. center : bool Putting the STFT keneral", "filter created, time used = {:.4f} seconds\".format(time()-start)) # Calculate num", "ifft(X_real) = (a1, a2) # ifft(X_imag)*1j = (b1, b2)*1j #", "-------- >>> spec_layer = Spectrogram.iSTFT() >>> specs = spec_layer(x) \"\"\"", "sr/2: raise ValueError('The top bin {}Hz has exceeded the Nyquist", "module by setting `iSTFT=True` if you want to use `inverse`\")", "downsampled, the CQT relative to the downsampled input is equavalent", "nn.ConstantPad1d(self.n_fft//2, 0) # elif self.pad_mode == 'reflect': # padding =", "If ``fmax`` is not ``None``, then the argument ``n_bins`` will", "right shape to do inverse # if self.center: # if", "as librosa CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': if self.trainable==False: #", "calculation of a constant Q transform.” (1992). This function is", ">>> mfcc = spec_layer(x) \"\"\" def __init__(self, sr=22050, n_mfcc=20, norm='ortho',", "= pred.unsqueeze(1) if pred.ndim == 3 else pred target =", "that the model can be used with nn.Parallel # self.kernel_sin", "torch.tensor(cqt_kernels.imag.astype(np.float32)) if trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos = torch.nn.Parameter(wcos,", "cqt kernel done, n_fft = \",self.n_fft) # Preparing kernels for", "tensor is in the shape of (batch, freq_bins, timesteps, 2)\"", "window self.win_length = win_length self.iSTFT = iSTFT self.trainable = trainable", "index is the center of the STFT kernel. Default value", "= spec_real[:, :self.freq_bins, :] spec_imag = spec_imag[:, :self.freq_bins, :] if", "If ``False``, the time index is the beginning of the", "of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``. Examples -------- >>> spec_layer", "= conv1d(x, self.wcos, stride=self.stride) # Doing STFT by using conv1d", "applies to the frequency axis x_shape = x.shape N =", "::2], x[:, :, 1::2].flip([2])], dim=2) Vc = torch.rfft(v, 1, onesided=False)", "be updated during model training. Default value is ``False``. verbose", "memory. When ``trainable=True`` and ``freq_scale!='no'``, there is no guarantee that", "of a constant Q transform.” (1992). early downsampling factor is", "the normalization used in librosa. window : str The windowing", "cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) print(\"STFT kernels created, time used = {:.4f}", "filter windows for inverse kernel_sin, kernel_cos, _, _, window_mask =", "torch.rfft(v, 1, onesided=False) # TODO: Can make the W_r and", "automatically broadcast to the right shape output_format : str Control", "'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # STFT fourier_real", "batch of magnitude spectrograms to waveforms. Parameters ---------- S :", "has the same output length of the original waveform, please", "forward(self,x, output_format=None): \"\"\" Convert a batch of waveforms to CQT", "audio clips, then the discrete cosine transform is calcuated to", "this argument does nothing. sr : int The sampling rate", "window='ones', freq_scale='no') # Converting kernels from numpy arrays to torch", "sr : int The sampling rate for the input audio.", "length. By default, ``length=None``, which will remove ``n_fft//2`` samples from", "CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding) CQT = torch.cat((CQT1,", "verbose=True): super().__init__() self.norm = norm # Now norm is used", "to CQT spectrograms. Parameters ---------- x_real : torch tensor Real", "print(f'win_length={self.win_length}\\tw={self.w.shape}') inverse = torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w,", "make it # same mag as 1992 CQT = CQT*self.downsample_factor", "per octave. Default is 12. trainable_STFT : bool Determine if", "spectrograms. ``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; ``shape = (num_samples,", "# attributes that will be used for _power_to_db if amin", "CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) if output_format=='Magnitude':", "output_format=None): \"\"\" Convert a batch of waveforms to CQT spectrograms.", "bin is inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax``", "= trainable self.hop_length = hop_length self.center = center self.pad_mode =", "2048. n_iter=32 : int The number of iterations for Griffin-Lim.", ": float The frequency for the lowest CQT bin. Default", "spec_layer = Spectrogram.CQT2010v2() >>> specs = spec_layer(x) \"\"\" # To", "\"\"\" if (hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv') !=", "amin = torch.tensor([amin]) ref = torch.abs(torch.tensor([ref])) self.register_buffer('amin', amin) self.register_buffer('ref', ref)", "start = time() # self.lowpass_filter = torch.tensor( # create_lowpass_filter( #", "time axis and freq axis def forward(self, x): \"\"\" Convert", "and the end of the output. refresh_win : bool Recalculating", "in [1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more computational", "share the same memory... kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0) kernel_cos_inv", "It only support type-II DCT at the moment. Input signal", "same as the 1992 algorithm. Therefore, we can reuse the", "if trainable_CQT: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)", "be updated during model training. Default value is ``False``. trainable_window", "arguments follow the convention from librosa. This class inherits from", "== 'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # STFT", "Doing STFT by using conv1d # remove redundant parts spec_real", "The ending frequency for the highest Mel filter bank. trainable_mel", "[1]. I slightly modify it so that it runs faster", "import torch import torch.nn as nn from torch.nn.functional import conv1d,", "is ``0.99``. device : str Choose which device to initialize", "= fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time domain to", "the Fourier kernels are trainable if trainable_kernels: # Making all", "norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs): super().__init__() self.melspec_layer = MelSpectrogram(sr=sr,", "not ``None``. bins_per_octave : int Number of bins per octave.", "create this window once to save time # Unless the", "``False`` is used, the Mel scale is quasi-logarithmic. When ``True``", "to the right shape \"\"\" x = broadcast_dim(x) if self.center:", "function is to calculate the Mel-frequency cepstral coefficients (MFCCs) of", "n_fft # Create filter windows for stft wsin, wcos, self.bins2freq", "that the inverse is perfect, please use with extra care.", "inversion \"\"\" def loss_fn(pred, target): pred = pred.unsqueeze(1) if pred.ndim", "These cqt_kernel is already in the frequency domain cqt_kernels_real =", "of the arguments follow the convention from librosa. This class", "is ``no``, this argument does nothing. sr : int The", "# CQT CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) CQT_imag = conv1d(x,", "the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. Parameters ---------- onesided :", "MelSpectrogram(sr=sr, verbose=verbose, **kwargs) self.m_mfcc = n_mfcc # attributes that will", "The filter banks maps the n_fft to mel bins. Default", "self.register_parameter('mel_basis', mel_basis) else: self.register_buffer('mel_basis', mel_basis) # if trainable_mel==True: # self.mel_basis", "Parameters ---------- sr : int The sampling rate for the", "used, the bin will start at 0Hz and end at", "the correct frequency. trainable : bool Determine if the STFT", "Determine if the CQT kernels are trainable or not. If", "make a small CQT kernel covering only the top octave.", "octave kernel self.fmin_t = fmin*2**(self.n_octaves-1) remainder = n_bins % bins_per_octave", "= 1/(2**(1/bins_per_octave)-1) print(\"Creating CQT kernels ...\", end='\\r') start = time()", "phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) def", "sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect',", "'cpu'. Returns ------- spectrogram : torch.tensor It returns a batch", "# Creating window function for stft and istft later self.w", "torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign for imaginary part", "fmin : int The starting frequency for the lowest Mel", "default value is ``0.99``. device : str Choose which device", "Default value is ``False``. verbose : bool If ``True``, it", "1) to support 2D Conv kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1) kernel_cos", "shape of (batch, freq_bins, timesteps, 2)\" # If the input", "use ``onesided=True``, else use ``onesided=False`` To make sure the inverse", "return type. ``Magnitude`` will return the magnitude of the STFT", "alogrithm uses the method proposed in [1]. I slightly modify", "# x_real = padding(x_real) # x_imag = padding(x_imag) # Watch", "// 2).\") padding = nn.ReflectionPad1d(self.pad_amount) x = padding(x) spec_imag =", "= create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # This", "for debugging \"\"\" x = broadcast_dim(x) if self.center: if self.pad_mode", "self.pad_mode == 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x,output_format=None): \"\"\" Convert", "torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self) ->", "window='hann', freq_scale='no', center=True, pad_mode='reflect', fmin=50, fmax=6000, sr=22050): super().__init__() self.stride =", "x = x.permute(0,2,1) # make freq the last axis, since", "result, shape = ``(num_samples, freq_bins, time_steps)``; 'Complex' will return the", "extra_repr(self) -> str: return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format(", "= X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1)) b2 = conv2d(X_imag_bc,", "\"\"\" Convert a batch of magnitude spectrograms to waveforms. Parameters", "the Mel filter banks will be updated during model training.", "phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.", "lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.5, kernelLength=256, transitionBandwidth=0.001 ) )", "Q transform.” (1992). Parameters ---------- sr : int The sampling", "freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False, trainable_window=False, verbose=True, refresh_win=True): super().__init__()", "torch.nn.Parameter(self.wcos) def forward(self, x): \"\"\" Convert a batch of waveforms", "these 3 shapes. Most of the arguments follow the convention", "(e.g. n_fft, window) need to be the same as the", "_, _, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin,", "------- spectrogram : torch.tensor It returns a batch of waveforms.", "of the STFT result, shape = ``(num_samples, freq_bins, time_steps)``; 'Complex'", "len_audio)``\\n 3. ``(num_audio, 1, len_audio)`` The correct shape will be", "filter requires for the kernel # n_octaves determines how many", "kernel_cos have the shape (freq_bins, 1, n_fft, 1) to support", "the downsampled input is equavalent to the next lower octave.", "print(\"CQT kernels created, time used = {:.4f} seconds\".format(time()-start)) def forward(self,x,", "window_mask) if verbose==True: print(\"iSTFT kernels created, time used = {:.4f}", "torch.sqrt(CQT.pow(2).sum(-1)) elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))", "be used later in inverse self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1)) if verbose==True: print(\"STFT", "final phase to reconstruct the waveforms inverse = torch.istft(S.unsqueeze(-1) *", "self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) # If center==True, the STFT window will be", "for division # Only need to create this window once", "MFCC(torch.nn.Module): \"\"\"This function is to calculate the Mel-frequency cepstral coefficients", "- Vc[:, :, :, 1] * W_i if norm ==", "= win_length self.iSTFT = iSTFT self.trainable = trainable start =", "MelSpectrogram(torch.nn.Module): \"\"\"This function is to calculate the Melspectrogram of the", "used, the Mel scale is logarithmic. The default value is", "= pred_stft.detach().clamp(eps) ** 0.5 pred_stft = pred_stft.view((*shape[:-2], n_freq, time)) if", "CQT bin. Default is ``None``, therefore the higest CQT bin", "For inverse, the Fourier kernels do not need to be", "win_length==None: win_length = n_fft if hop_length==None: hop_length = int(win_length //", "CQT = CQT[:,-self.n_bins:,:] # Removing unwanted top bins if self.norm:", "``n_bins`` will be calculated automatically. Default is ``None`` n_bins :", "time index is the beginning of the STFT kernel, if", "spectrograms. If you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.", "= spec_layer(x) \"\"\" def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0,", "padding method. Default value is 'reflect'. inverse : bool To", "= hop_length self.pad_mode = pad_mode self.n_bins = n_bins self.output_format =", "= {}, fmin_t = {}, n_filters = {}\".format(Q, self.fmin_t, n_filters))", "The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer", "phase def inverse(self, X, onesided=True, length=None, refresh_win=True): \"\"\" This function", "2)\" # If the input spectrogram contains only half of", "self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.kernel_width//2, 0) elif self.pad_mode ==", "is to downsample the input audio to reduce the CQT", "shape to do inverse x_imag.transpose_(1,2) # Prepare the right shape", "fmax=fmax, sr=sr, verbose=False) window_mask = get_window(window,int(win_length), fftbins=True) # For inverse,", "(2010). [2] Brown, <NAME>. and <NAME>. “An efficient algorithm for", "frequency bin for the top octave kernel self.fmin_t = fmin*2**(self.n_octaves-1)", "= self.n_fft // 2 self.refresh_win = refresh_win start = time()", "if grad_threshold and pred_stft.grad.max() < grad_threshold: if verbose: print(f\"Target max", "will remove ``n_fft//2`` samples from the start and the end", "tensor to the shape that fits conv1d self.register_buffer('lowpass_filter', lowpass_filter[None,None,:]) if", "windowing functions. The default value is 'hann'. freq_scale : 'linear',", "from the start and the end of the output. If", "= \",self.n_fft) # If center==True, the STFT window will be", ":, 1::2].flip([2])], dim=2) Vc = torch.rfft(v, 1, onesided=False) # TODO:", "sgd_kwargs: default_sgd_kwargs.update(sgd_kwargs) sgd_kwargs = default_sgd_kwargs mel_basis = self.mel_basis.detach() shape =", "real = a1-b2 return (real/self.n_fft, imag/self.n_fft) class iSTFT(torch.nn.Module): \"\"\"This class", "= torch.zeros(*angles.shape, device=self.device) for _ in range(self.n_iter): tprev = rebuilt", "...\", end='\\r') start = time() basis, self.n_fft, lenghts = create_cqt_kernels(Q,", "verbose==True: print(\"STFT filter created, time used = {:.4f} seconds\".format(time()-start)) print(\"Mel", "to the frequency axis x_shape = x.shape N = x_shape[-1]", "n_bins=84, bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect', trainable=False, output_format='Magnitude', verbose=True): super().__init__()", "to Mel spectrograms. Parameters ---------- x : torch tensor Input", "the correct ``fmin`` and ``fmax``. Setting the correct sampling rate", "of a float epsilon = 10e-8 # fudge factor for", "= torch.tensor(create_lowpass_filter( band_center = 0.50, kernelLength=256, transitionBandwidth=0.001) ) # Broadcast", "``True``, the time index is the center of the CQT", "# To DO: # need to deal with the filter", "training. Default value is ``False``. trainable_window : bool Determine if", "= torch.nn.Parameter(self.mel_basis) # if trainable_STFT==True: # self.wsin = torch.nn.Parameter(self.wsin) #", "bins. Default is ``None``, which means ``n_fft//2+1`` bins Please make", "is same as ``torch.nn.Module``. This alogrithm uses the resampling method", "else use ``onesided=False`` length : int To make sure the", "positive and negative signs # ifft = e^(+2\\pi*j)*X # ifft(X_real)", "import numpy as np from time import time from nnAudio.librosa_functions", "shape (freq_bins, 1, n_fft, 1) to support 2D Conv kernel_sin", "tensors wsin = torch.tensor(kernel_sin * window) wcos = torch.tensor(kernel_cos *", "default value is ``32`` hop_length : int The hop (or", "time() mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm)", "{:.4f} seconds\".format(time()-start)) else: pass def forward(self, X, onesided=False, length=None, refresh_win=None):", "number is stored as ``(real, imag)`` in the last axis.", "= torch.tensor(cqt_kernels.real).unsqueeze(1) cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real,", "spec_layer(x) \"\"\" def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no',", "cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) print(\"STFT kernels created, time", "freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr) self.wsin = torch.tensor(wsin, dtype=torch.float)", "linear spacing. Please make sure the value is the same", "``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.CQT1992v2() >>>", "wsin) self.register_parameter('wcos', wcos) # Prepare the shape of window mask", "x.permute(0,2,1) # make freq the last axis, since dct applies", "broadcast_dim(x) spec = self.stft(x, output_format='Magnitude')**self.power melspec = torch.matmul(self.mel_basis, spec) return", "stride=self.stride) imag = a2+b1 real = a1-b2 return (real/self.n_fft, imag/self.n_fft)", "again to get same result as librosa CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))", "= padding(x) spec_imag = conv1d(x, self.wsin, stride=self.stride) spec_real = conv1d(x,", "== 'constant': padding = nn.ConstantPad1d(self.kernel_width//2, 0) elif self.pad_mode == 'reflect':", "time() sr, self.hop_length, self.downsample_factor, early_downsample_filter, \\ self.earlydownsample = get_early_downsample_params(sr, hop_length,", "value is ``False``. verbose : bool If ``True``, it shows", "the filter and other tensors def __init__(self, sr=22050, hop_length=512, fmin=32.70,", "= window self.win_length = win_length self.iSTFT = iSTFT self.trainable =", "default value is ``False``. fmin : int The starting frequency", "topbin_check=False) # This is for the normalization in the end", "# band_center = 0.50, # kernelLength=256, # transitionBandwidth=0.001)) lowpass_filter =", "return torch.stack((CQT_real,CQT_imag),-1) elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))", "(1992). This function is to calculate the CQT of the", "numpy as np from time import time from nnAudio.librosa_functions import", "n_fft self.win_length = win_length self.stride = hop_length self.center = center", "CQT kernel. Default value if ``True``. pad_mode : str The", "trainable_window : bool Determine if the window function is trainable", "real = conv1d(x, self.wcos, stride=self.stride) return (real, -imag) def inverse(self,x_real,x_imag):", "the shape of window mask so that it can be", "clone from librosa.griffinlim. [1] <NAME>., <NAME>., & <NAME>. “A fast", "the iSTFT module or not. By default, it is False", "= hop_length self.pad_mode = pad_mode self.n_bins = n_bins self.earlydownsample =", "STFT window will be put in the middle, and paddings", "torch.nn.Parameter(self.wsin) # self.wcos = torch.nn.Parameter(self.wcos) def forward(self, x): \"\"\" Convert", "right shape \"\"\" x = self.melspec_layer(x) x = self._power_to_db(x) x", "less the same except in the very low frequency region", "(b1, b2)*1j # = (-b2, b1) a1 = conv1d(x_real, self.wcos,", "= torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels) self.register_parameter('kernel_sin', kernel_sin) self.register_parameter('kernel_cos', kernel_cos) else: self.register_buffer('kernel_sin', kernel_sin)", "argument is True if verbose==True: print(\"Creating early downsampling filter ...\",", "seconds\".format(time()-start)) # Caluate num of filter requires for the kernel", "Determine if the window function is trainable or not. Default", "as the forward STFT. center : bool Putting the iSTFT", "center=True, pad_mode='reflect', iSTFT=False, fmin=50, fmax=6000, sr=22050, trainable=False, output_format=\"Complex\", verbose=True): super().__init__()", "use :func:`~nnAudio.Spectrogram.Griffin_Lim`. Parameters ---------- onesided : bool If your spectrograms", ": bool Putting the CQT keneral at the center of", "n_fft, 1) \"\"\" def __init__(self, n_fft=2048, freq_bins=None, hop_length=512, window='hann', freq_scale='no',", "trainable using ``trainable_mel`` and ``trainable_STFT``. It only support type-II DCT", "bin will start at 0Hz and end at Nyquist frequency", "bins, please use ``onesided=True``, else use ``onesided=False`` length : int", "kenrels are trainable or not. If ``True``, the gradients for", "get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) # Getting the top octave", "real = real[:, :length] return real def extra_repr(self) -> str:", "= nn.ReflectionPad1d(self.n_fft//2) x = padding(x) imag = conv1d(x, self.wsin, stride=self.stride)", "same mag as 1992 CQT = CQT*self.downsample_factor if output_format=='Magnitude': #", "angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing the phase # Using the", "such as n_fft, n_mels, hop_length, and window Returns ------- MFCCs", "class MFCC(torch.nn.Module): \"\"\"This function is to calculate the Mel-frequency cepstral", "from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``. This", "fmax=6000, sr=22050): super().__init__() self.stride = hop_length self.center = center self.pad_mode", "kernel_cos) else: self.register_buffer('kernel_sin', kernel_sin) self.register_buffer('kernel_cos', kernel_cos) # Decide if the", "follow the convention from librosa. This class inherits from ``torch.nn.Module``,", "important for calculating the correct frequency. n_fft : int The", "hop_length, fmax_t, Q, self.n_octaves, verbose) self.register_buffer('early_downsample_filter', early_downsample_filter) if verbose==True: print(\"Early", "remainder = n_bins % bins_per_octave # print(\"remainder = \", remainder)", "def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1,", "requires_grad=trainable_window) self.register_parameter('window_mask', window_mask) else: self.register_buffer('window_mask', window_mask) if verbose==True: print(\"iSTFT kernels", "x = self.melspec_layer(x) x = self._power_to_db(x) x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:]", "self.momentum = momentum self.device = device if win_length==None: self.win_length=n_fft else:", "self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1)) if verbose==True: print(\"STFT kernels created, time used =", "Short-Time Fourier Transform (STFT) # We set the frequency range", "(*self.cqt_kernels_real.shape,) ) class CQT1992v2(torch.nn.Module): \"\"\"This function is to calculate the", "save time # Unless the input spectrograms have different time", "momentum : float The momentum for the update rule. The", "frequency fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) else: # Calculate the top bin", "``None``. bins_per_octave : int Number of bins per octave. Default", "Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real", "imag = conv1d(x, self.wsin, stride=self.stride) real = conv1d(x, self.wcos, stride=self.stride)", "where freq < 40Hz. Parameters ---------- sr : int The", "be used with nn.Parallel # self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable) #", "\"Please make sure your input is in the shape of", "of the output. refresh_win : bool Recalculating the window sum", "the shape of (batch, freq_bins, timesteps, 2).\"\\ \"\\nIf you have", "for the calculation of a constant Q transform.” (1992). early", "= output_format or self.output_format x = broadcast_dim(x) if self.earlydownsample==True: x", "quasi-logarithmic. When ``True`` is used, the Mel scale is logarithmic.", "torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8) return CQT elif output_format=='Complex': return torch.stack((CQT_real,CQT_imag),-1) elif output_format=='Phase': phase_real", "freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=False) window_mask = get_window(window,int(win_length), fftbins=True) #", "used = {:.4f} seconds\".format(time()-start)) # print(\"Getting cqt kernel done, n_fft", "self.wsin, stride=self.stride) b1 = conv1d(x_imag, self.wcos, stride=self.stride) b2 = conv1d(x_imag,", "torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0) kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0) if iSTFT:", "(self.w_sum>1e-10) else: pass real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices]) # Remove padding", "int Number of bins per octave. Default is 12. norm", "= time() lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.5, kernelLength=256, transitionBandwidth=0.001", "return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format( self.n_fft, (*self.wsin.shape,), self.iSTFT,", "STFT optimization\") if loss_threshold and loss < loss_threshold: if verbose:", "spec_layer = Spectrogram.MFCC() >>> mfcc = spec_layer(x) \"\"\" def __init__(self,", "number,\" \\ \"make sure our tensor is in the shape", "STFT kernels will also be caluclated and the STFT kernels", "coefficients norm : string The default value is 'ortho'. Normalization", "it same mag as 1992 CQT = CQT*self.downsample_factor if output_format=='Magnitude':", "the short-time Fourier transform (STFT) of the input signal. Input", "CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': if self.trainable==False: # Getting CQT", "variables nn.Parameter, so that the model can be used with", "pred.ndim == 3 else pred target = target.unsqueeze(1) if target.ndim", "value is 'hann'. freq_scale : 'linear', 'log', or 'no' Determine", "the very low frequency region where freq < 40Hz. Parameters", "spec_imag[:, :self.freq_bins, :] if output_format=='Magnitude': spec = spec_real.pow(2) + spec_imag.pow(2)", "first extracts Mel spectrograms from the audio clips, then the", "start = time() kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft,", "bins_per_octave, norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1) cqt_kernels_imag", "short-time Fourier transform (STFT) of the input signal. Input signal", "12. norm : bool Normalization for the CQT result. basis_norm", "'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # CQT CQT_real", ":, 1] * W_i if norm == 'ortho': V[:, :,", "int Number of frequency bins. Default is ``None``, which means", "same except in the very low frequency region where freq", "center self.pad_mode = pad_mode self.output_format = output_format # creating kernels", "cepstral coefficients norm : string The default value is 'ortho'.", "== 'constant': padding = nn.ConstantPad1d(self.pad_amount, 0) elif self.pad_mode == 'reflect':", "get same result as librosa CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude':", "self.register_parameter('wcos', wcos) else: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if trainable_CQT: cqt_kernels_real", "``(real, imag)`` in the last axis. Default value is 'Magnitude'.", "output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1)", "str Determine the return type. 'Magnitude' will return the magnitude", "kernels window_mask = torch.tensor(window_mask) wsin = kernel_sin * window_mask wcos", "to do inverse x_imag.transpose_(1,2) # Prepare the right shape to", "int(win_length // 4) self.output_format = output_format self.trainable = trainable self.stride", "if self.earlydownsample == True: # Do early downsampling if this", "activate early downsampling later if possible self.trainable = trainable self.output_format", "is very important for calculating the correct frequency. n_mfcc :", "only the top octave. Then we keep downsampling the input", "is ``False``. trainable_window : bool Determine if the window function", "This alogrithm uses the method proposed in [1]. Please refer", "= n_fft self.win_length = win_length self.stride = hop_length self.center =", "output_format : str Control the type of spectrogram to be", "gradient when sqrt(0) due to output=0 else: return torch.sqrt(spec) elif", "Please don't use the following classes # class DFT(torch.nn.Module): \"\"\"", "Default value is 'cpu'. Returns ------- spectrogram : torch.tensor It", "center self.pad_mode = pad_mode self.n_fft = n_fft # Create filter", "cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) if verbose==True: print(\"CQT kernels created, time used", "tensor Real part of the signal. x_imag : torch tensor", "perfect, please use with extra care. Parameters ---------- n_fft :", "Default is 32.70Hz, which coresponds to the note C0. fmax", "specs = spec_layer(x) \"\"\" def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None,", "all the frames real = overlap_add(real, self.stride) # Prepare the", "cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) if verbose==True: print(\"CQT kernels", "the gradients for Mel filter banks will also be calculated", "amplitude with n_fft real /= (self.n_fft) # Overlap and Add", "method proposed in [1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a", "self.n_fft, lenghts = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False)", "if verbose==True: print(\"CQT kernels created, time used = {:.4f} seconds\".format(time()-start))", "-1) class CQT(CQT1992v2): \"\"\"An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to", "``False``. verbose : bool If ``True``, it shows layer information.", "Initializing Random Phase rand_phase = torch.randn(*S.shape, device=self.device) angles = torch.empty((*S.shape,2),", "updated during model training. Default value is ``False``. trainable_STFT :", "== 'constant': padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect':", "torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag,", "Q transform.” (1992). Early downsampling factor is to downsample the", "made trainable using ``trainable_mel`` and ``trainable_STFT``. It only support type-II", "in the shape of (batch, freq_bins, timesteps, 2)\" # If", "freq_bins=freq_bins, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=verbose) kernel_sin = torch.tensor(kernel_sin,", "time_steps)``; 'Complex' will return the STFT result in complex number,", "as ``torch.nn.Module``. This alogrithm uses the method proposed in [1].", "window mask so that it can be used later in", "= hop_length self.center = center self.pad_amount = self.n_fft // 2", "if self.trainable==False: # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) else: return", "and memory efficient version. [1] Brown, <NAME>. and <NAME>. “An", "``False`` trainable_CQT : bool Determine if the frequency domain CQT", "end at Nyquist frequency with linear spacing. Please make sure", "the time axis and freq axis def forward(self, x): \"\"\"", "the STFT result in complex number, shape = ``(num_samples, freq_bins,", "target): pred = pred.unsqueeze(1) if pred.ndim == 3 else pred", "all prints device : str Choose which device to initialize", "also be changed during the ``forward`` method. verbose : bool", "kernels created, time used = {:.4f} seconds\".format(time()-start)) if trainable_STFT: wsin", "Can make the W_r and W_i trainable here k =", "output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1)", "uses the method proposed in [1]. I slightly modify it", "rebuilt = torch.stft(inverse, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, pad_mode=self.pad_mode) # Phase", "time() # self.lowpass_filter = torch.tensor( # create_lowpass_filter( # band_center =", "If freq_scale is ``no``, this argument does nothing. sr :", "torch.tensor(wcos) if verbose==True: print(\"STFT kernels created, time used = {:.4f}", "redundant parts spec_real = spec_real[:, :self.freq_bins, :] spec_imag = spec_imag[:,", "Parameters ---------- n_fft : int The window size. Default value", "# Create filter windows for stft wsin, wcos, self.bins2freq =", "rate for the input audio. It is used to calculate", "self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no') wsin = kernel_sin", "= torch.tensor(wsin) wcos = torch.tensor(wcos) if verbose==True: print(\"STFT kernels created,", "normalize as librosa # self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width print(\"Creating STFT kernels ...\", end='\\r')", "melspec).clamp(eps) pred_stft = nn.Parameter(pred_stft, requires_grad=True) sgd_kwargs[\"lr\"] = sgd_kwargs[\"lr\"] * batch_size", "# and ending are required. if self.pad_mode == 'constant': self.padding", "CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) # Getting CQT Amplitude CQT", "Stopping optimization.\") break pred_stft = pred_stft.detach().clamp(eps) ** 0.5 pred_stft =", "torch.max(log_spec, batch_wise_max - self.top_db) return log_spec def _dct(self, x, norm=None):", "CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) # Normalizing the output with the downsampling", "/ np.float(bins_per_octave)) lenghts = np.ceil(Q * sr / freqs) lenghts", "if win_length==None: self.win_length=n_fft else: self.win_length=win_length if hop_length==None: self.hop_length = n_fft//4", "# class DFT(torch.nn.Module): \"\"\" Experimental feature before `torch.fft` was made", "n_fft, n_mels, hop_length, and window Returns ------- MFCCs : torch.tensor", "function for CQT. It uses ``scipy.signal.get_window``, please refer to scipy", "value is 512. window : str The windowing function for", "possible windowing functions. The default value is 'hann' pad_mode :", "is 512. fmin : float The frequency for the lowest", "faster than the original 1992 algorithm, that is why I", "self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) print(\"STFT", "def extra_repr(self) -> str: return 'n_mfcc = {}'.format( (self.n_mfcc) )", "remainder) if remainder==0: # Calculate the top bin frequency fmax_t", ": torch tensor Real part of the signal. x_imag :", "device=self.device) for _ in range(self.n_iter): tprev = rebuilt # Saving", ":self.freq_bins, :] spec_imag = spec_imag[:, :self.freq_bins, :] if output_format=='Magnitude': spec", "return type. 'Magnitude' will return the magnitude of the STFT", "self.pad_mode == 'constant': # padding = nn.ConstantPad1d(self.n_fft//2, 0) # elif", "the CQT kernel, if ``True``, the time index is the", "stride=self.stride) b1 = conv1d(x_imag, self.wcos, stride=self.stride) b2 = conv1d(x_imag, self.wsin,", "also be caluclated and the CQT kernels will be updated", "kernel_cos) # Decide if the window function is trainable if", ":,:,0] = torch.cos(2 * np.pi * rand_phase) angles[:,:,:,1] = torch.sin(2", "the window function is trainable if trainable_window: window_mask = torch.nn.Parameter(window_mask,", "by setting `iSTFT=True` if you want to use `inverse`\") assert", "end='\\r') start = time() # self.lowpass_filter = torch.tensor( # create_lowpass_filter(", "hop_length self.center = center self.pad_mode = pad_mode self.norm = norm", "windowed window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1) # kernel_sin and kernel_cos have the", "it is False to save GPU memory. fmin : int", "None: if self.top_db < 0: raise ParameterError('top_db must be non-negative')", "made avaliable. The inverse function only works for 1 single", "-conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) if output_format=='Magnitude': if self.trainable==False:", "``Phase``. The output_format can also be changed during the ``forward``", "(n_fft // 2).\") padding = nn.ReflectionPad1d(self.pad_amount) x = padding(x) spec_imag", "of waveforms to CQT spectrograms. Parameters ---------- x : torch", "in Mel -> STFT optimization\") if loss_threshold and loss <", "= CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': # Getting", "Making all these variables nn.Parameter, so that the model can", "downsampling the input audio by a factor of 2 to", "``n_fft//2+1`` frequency bins, please use ``onesided=True``, else use ``onesided=False`` length", "Phase update rule angles[:,:,:] = rebuilt[:,:,:] - (self.momentum / (1", "It uses ``scipy.signal.get_window``, please refer to scipy documentation for possible", "= n_fft self.win_length = win_length self.n_iter = n_iter self.center =", "the CQT kernels are trainable or not. If ``True``, the", "This is for the normalization in the end freqs =", ": bool Determine if the STFT kenrels are trainable or", "elif self.pad_mode == 'reflect': # padding = nn.ReflectionPad1d(self.n_fft//2) # x_real", ">>> specs = spec_layer(x) \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=32.70,", "{loss_threshold} reached. Stopping optimization.\") break if grad_threshold and pred_stft.grad.max() <", "error in calculating phase def inverse(self, X, onesided=True, length=None, refresh_win=True):", "elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.n_fft//2) x = padding(x)", "have the shape (freq_bins, 1, n_fft, 1) to support 2D", "as the forward STFT. hop_length : int The hop (or", "CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) return CQT*torch.sqrt(self.lenghts.view(-1,1)) class CQT2010v2(torch.nn.Module): \"\"\"This function is", "The kernel creation process is still same as the 1992", "1, len_audio)`` The correct shape will be inferred automatically if", "__init__(self, n_fft, n_iter=32, hop_length=None, win_length=None, window='hann', center=True, pad_mode='reflect', momentum=0.99, device='cpu'):", "kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.kernel_width, window='ones', freq_scale='no') # Converting", "a2) # ifft(X_imag)*1j = (b1, b2)*1j # = (-b2, b1)", "the spectrogram output type, either ``Magnitude``, ``Complex``, or ``Phase``. The", "torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) class CQT(CQT1992v2): \"\"\"An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`.", "the beginning # and ending are required. if self.pad_mode ==", "a constant Q transform.” (1992). This function is to calculate", "* window wcos = kernel_cos * window wsin = torch.tensor(wsin)", "domain to freq domain # These cqt_kernel is already in", "if self.norm: CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if", ": str Choose which device to initialize this layer. Default", "The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer", ">>> spec_layer = Spectrogram.iSTFT() >>> specs = spec_layer(x) \"\"\" def", "is 512. window : str The windowing function for STFT.", "def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=1, window='hann',", "'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x,output_format=None): \"\"\" Convert a batch", "n_fft, n_iter=32, hop_length=None, win_length=None, window='hann', center=True, pad_mode='reflect', momentum=0.99, device='cpu'): super().__init__()", "self.register_buffer('amin', amin) self.register_buffer('ref', ref) self.top_db = top_db self.n_mfcc = n_mfcc", "= spec_layer(x) \"\"\" def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512, window='hann',", "\"\"\" Convert a batch of waveforms to MFCC. Parameters ----------", "= torch.tensor(create_lowpass_filter( band_center = 0.5, kernelLength=256, transitionBandwidth=0.001 ) ) #", "as ``torch.nn.Module``. Parameters ---------- n_fft : int The window size.", "= n_fft//4 else: self.hop_length = hop_length # Creating window function", "= nn.ReflectionPad1d(self.n_fft//2) # x_real = padding(x_real) # x_imag = padding(x_imag)", "= conv1d(x, self.wsin, stride=self.stride) spec_real = conv1d(x, self.wcos, stride=self.stride) #", "used, the bin spacing can be controlled by ``fmin`` and", "X, onesided=True, length=None, refresh_win=True): \"\"\" This function is same as", "torch.tensor(kernel_sin * window) wcos = torch.tensor(kernel_cos * window) cqt_kernels_real =", "= (b1, b2)*1j # = (-b2, b1) a1 = conv1d(x_real,", "stride=self.hop_length) # Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) return CQT*torch.sqrt(self.lenghts.view(-1,1))", "angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing the phase #", "x_shape = x.shape N = x_shape[-1] v = torch.cat([x[:, :,", "waveforms to MFCC. Parameters ---------- x : torch tensor Input", "for the highest frequency bin. If freq_scale is ``no``, this", "the same length, please use ``refresh_win=None`` to increase computational speed.", "def forward(self, X, onesided=False, length=None, refresh_win=None): \"\"\" If your spectrograms", "Remember the minus sign for imaginary part elif output_format=='Phase': return", "...\", end='\\r') start = time() cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,", "center=True, pad_mode='reflect', momentum=0.99, device='cpu'): super().__init__() self.n_fft = n_fft self.win_length =", "Will be ignored if ``fmax`` is not ``None``. bins_per_octave :", "x, output_format=None): \"\"\" Convert a batch of waveforms to spectrograms.", "the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;", "same as the normalization used in librosa. window : str", "the normalization in the end freqs = fmin * 2.0", "'hann' pad_mode : str The padding method. Default value is", "to the downsampled input is equavalent to the next lower", "to save GPU memory. fmin : int The starting frequency", "wcos = torch.tensor(wcos) if verbose==True: print(\"STFT kernels created, time used", "center self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window, freq_scale='no', center=center, pad_mode=pad_mode,", "% bins_per_octave # print(\"remainder = \", remainder) if remainder==0: #", "that is why I call it version 2. [1] Brown,", "small CQT kernel. Everytime the input audio is downsampled, the", "stft_inversion_params = stft_inversion_params or {} if mel_inversion_params: mel_inversion_params = {**default_mel_inversion_params,", "= 1/(2**(1/bins_per_octave)-1) # Creating lowpass filter and make it a", "``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; ``shape = (num_samples, freq_bins,time_steps,", "Converting Magnitude spectrograms back to waveforms based on the \"fast", "CQT n_filters = min(bins_per_octave, n_bins) self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))", "By default, ``length=None``, which will remove ``n_fft//2`` samples from the", "if the time to frequency domain transformation kernel for the", "n_filters = {}\".format(Q, self.fmin_t, n_filters)) basis, self.n_fft, _ = create_cqt_kernels(Q,", "default, it is False to save GPU memory. fmin :", "index is the beginning of the iSTFT kernel, if ``True``,", "IEEE Workshop on Applications of Signal Processing to Audio and", ": torch.tensor It returns a batch of waveforms. Examples --------", "pad_mode='reflect', iSTFT=False, fmin=50, fmax=6000, sr=22050, trainable=False, output_format=\"Complex\", verbose=True): super().__init__() #", "input spectrograms X are of the same length, please use", "self.trainable==False: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if self.trainable==True: wsin = torch.nn.Parameter(wsin,", "if hop_length==None: self.hop_length = n_fft//4 else: self.hop_length = hop_length #", "frequency domain transformation kernel for the input audio is trainable", "< self.pad_amount: raise AssertionError(\"Signal length shorter than reflect padding length", "numpy arrays to torch tensors wsin = torch.tensor(kernel_sin * window)", "recommended to use the ``inverse`` method under the ``STFT`` class", "...\", end='\\r') start = time() # self.lowpass_filter = torch.tensor( #", "Default is 12. norm : int Normalization for the CQT", "frequency with linear spacing. Please make sure the value is", "the input audio to reduce the CQT kernel size. The", "= {}\".format(Q, self.fmin_t, n_filters)) basis, self.n_fft, _ = create_cqt_kernels(Q, sr,", "Magnitude spectrograms back to waveforms based on the \"fast Griffin-Lim\"[1].", "the frames real = overlap_add(real, self.stride) # Prepare the window", "# make the dim same as log_spec so that it", "return (real/self.n_fft, imag/self.n_fft) class iSTFT(torch.nn.Module): \"\"\"This class is to convert", "not required, it is recommended to use the ``inverse`` method", "Default value is 'Magnitude'. verbose : bool If ``True``, it", "argument does nothing. sr : int The sampling rate for", "a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1))", "and ``freq_scale!='no'``, there is no guarantee that the inverse is", "top octave CQT x_down = x # Preparing a new", "fast Griffin-Lim algorithm,” IEEE Workshop on Applications of Signal Processing", "Now norm is used to normalize the final CQT result", "value is 'cpu'. Returns ------- spectrogram : torch.tensor It returns", "if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode", "= 2 * V return V.permute(0,2,1) # swapping back the", "the original 1992 algorithm, that is why I call it", "``Complex`` will return the STFT result in complex number, shape", "device=self.device) angles[:, :,:,0] = torch.cos(2 * np.pi * rand_phase) angles[:,:,:,1]", "# Do early downsampling if this argument is True if", "freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=verbose) kernel_sin = torch.tensor(kernel_sin, dtype=torch.float) kernel_cos", "__init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, fmin=50, fmax=6000,", "self.hop_length = hop_length self.pad_mode = pad_mode self.n_bins = n_bins self.earlydownsample", "set the frequency range in the CQT filter instead of", "print(f\"Target error of {loss_threshold} reached. Stopping optimization.\") break if grad_threshold", "the input spectrograms have different time steps if hasattr(self, 'w_sum')==False", "transform.” (1992). early downsampling factor is to downsample the input", "which means ``n_fft//2+1`` bins. hop_length : int The hop (or", "loss.backward() optimizer.step() # Check conditions if not loss.isfinite(): raise OverflowError(\"Overflow", "torch.nn.Parameter(wsin, requires_grad=self.trainable) wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos)", "top octave. Then we keep downsampling the input audio by", "== 'constant': self.padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect':", "other tensors def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12,", "1/(2**(1/bins_per_octave)-1) if verbose==True: print(\"Creating CQT kernels ...\", end='\\r') start =", "self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable) # Applying window functions to the", "iSTFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for", "DCT basis **kwargs Other arguments for Melspectrogram such as n_fft,", "CQT kernel. Everytime the input audio is downsampled, the CQT", "inverse, the Fourier kernels do not need to be windowed", "inverse. If trainability is not required, it is recommended to", "will return the magnitude of the STFT result, shape =", "# SGD arguments default_sgd_kwargs = dict(lr=1e3, momentum=0.9) if sgd_kwargs: default_sgd_kwargs.update(sgd_kwargs)", "by ``fmin`` and ``fmax``. If 'no' is used, the bin", "0) if iSTFT: self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1)) self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1)) # Making all", "for Short-Time Fourier Transform (STFT) # We set the frequency", "= center self.pad_mode = pad_mode self.norm = norm self.output_format =", "error of {loss_threshold} reached. Stopping optimization.\") break if grad_threshold and", "same as log_spec so that it can be broadcasted batch_wise_max", "window functions to the Fourier kernels window_mask = torch.tensor(window_mask) wsin", "the positive and negative signs # ifft = e^(+2\\pi*j)*X #", "= kernel_cos * window_mask if self.trainable==False: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos)", "Default is 84. Will be ignored if ``fmax`` is not", "the forward STFT. freq_scale : 'linear', 'log', or 'no' Determine", "= conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) CQT_imag = -conv1d(x,", "= torch.optim.SGD([pred_stft], **sgd_kwargs) losses = [] for i in range(max_steps):", "center=True, pad_mode='reflect', trainable=False, output_format='Magnitude', verbose=True): super().__init__() # norm arg is", "!= True) or (hasattr(self, 'kernel_cos_inv') != True): raise NameError(\"Please activate", "or `log` is used, the bin spacing can be controlled", "batch_size optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs) losses = [] for i", "as librosa # self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width print(\"Creating STFT kernels ...\", end='\\r') start", "by dividing n_fft # basis_norm is for normalizing basis self.hop_length", "kernel, if ``True``, the time index is the center of", "basis, self.n_fft, _ = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm,", "conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) CQT_imag = -conv1d(x, self.cqt_kernels_imag,", "is True if verbose==True: print(\"Creating early downsampling filter ...\", end='\\r')", "is None: if self.center: real = real[:, self.pad_amount:-self.pad_amount] else: if", "* torch.log10(torch.max(self.amin, self.ref)) if self.top_db is not None: if self.top_db", "the gradients for CQT kernels will also be caluclated and", "version 2. [1] Brown, <NAME>. and <NAME>. “An efficient algorithm", "norm=None): ''' Refer to https://github.com/zh217/torch-dct for the original implmentation. '''", "to normalize the final CQT result by dividing n_fft #", "= self._dct(x, norm='ortho')[:,:self.m_mfcc,:] return x def extra_repr(self) -> str: return", "\"\"\" This alogrithm uses the method proposed in [1]. Please", "< 40Hz. Parameters ---------- sr : int The sampling rate", "output_format=='Complex': return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag =", "The default value is ``0.99``. device : str Choose which", "window once to save time # Unless the input spectrograms", "= (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; ``shape = (num_samples, freq_bins,time_steps, 2)``", "mask so that it can be used later in inverse", "is the center of the STFT kernel. Default value if", "code from the 1992 alogrithm [2] [1] Schörkhuber, Christian. “CONSTANT-Q", "return melspec def extra_repr(self) -> str: return 'Mel filter banks", "bins, please use ``onesided=True``, else use ``onesided=False`` To make sure", "Classes ---------------------------### class STFT(torch.nn.Module): \"\"\"This function is to calculate the", "for the CQT kernels. ``1`` means L1 normalization, and ``2``", "self.padding) CQT = torch.cat((CQT1, CQT),1) CQT = CQT[:,-self.n_bins:,:] # Removing", "time used = {:.4f} seconds\".format(time()-start)) # print(\"Getting cqt kernel done,", "/= (self.n_fft) # Overlap and Add algorithm to connect all", "Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is make", "// 4) self.output_format = output_format self.trainable = trainable self.stride =", "+ length] else: real = real[:, :length] return real class", "If your input spectrograms X are of the same length,", "Convert a batch of waveforms to CQT spectrograms. Parameters ----------", "window_mask) else: self.register_buffer('window_mask', window_mask) if verbose==True: print(\"iSTFT kernels created, time", "# ifft = e^(+2\\pi*j)*X # ifft(X_real) = (a1, a2) #", "``trainable_mel`` and ``trainable_STFT``. It only support type-II DCT at the", "self.fmin_t*2**((remainder-1)/bins_per_octave) self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins", "kernels ...\", end='\\r') start = time() # print(\"Q = {},", "0: raise ParameterError('amin must be strictly positive') amin = torch.tensor([amin])", "nn.ReflectionPad1d(self.n_fft//2) # x_real = padding(x_real) # x_imag = padding(x_imag) #", "a more computational and memory efficient version. [1] Brown, <NAME>.", "= conv2d(X_real_bc, self.kernel_cos, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1)) #", "used = {:.4f} seconds\".format(time()-start)) def forward(self,x, output_format=None): \"\"\" Convert a", "\",self.n_fft) # If center==True, the STFT window will be put", "implmentation. ''' log_spec = 10.0 * torch.log10(torch.max(S, self.amin)) log_spec -=", ":func:`~nnAudio.Spectrogram.CQT1992v2` for a more computational and memory efficient version. [1]", "put in the middle, and paddings at the beginning #", "if mel_inversion_params: mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params} if stft_inversion_params: stft_inversion_params =", "lenghts) self.basis=basis fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from", "filter created, \\ time used = {:.4f} seconds\".format(time()-start)) else: self.downsample_factor=1.", "1e-16) # normalizing the phase # Using the final phase", "# Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) else: return torch.sqrt(CQT.pow(2).sum(-1)+1e-8) elif", "the value is the same as the forward STFT. momentum", "hop_length self.center = center self.pad_amount = self.n_fft // 2 self.refresh_win", "len_audio)`` The correct shape will be inferred automatically if the", "the usage is same as ``torch.nn.Module``. Parameters ---------- n_fft :", "the \"fast Griffin-Lim\"[1]. This Griffin Lim is a direct clone", "wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels) self.register_parameter('wsin', wsin)", "self.device = device if win_length==None: self.win_length=n_fft else: self.win_length=win_length if hop_length==None:", "int The ending frequency for the highest Mel filter bank.", "correct shape will be inferred automatically if the input follows", "based on the \"fast Griffin-Lim\"[1]. This Griffin Lim is a", "this way, the inverse kernel and the forward kernel do", "kernels ...\", end='\\r') start = time() cqt_kernels, self.kernel_width, lenghts =", "torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) return CQT*torch.sqrt(self.lenghts.view(-1,1)) class CQT2010v2(torch.nn.Module): \"\"\"This function is to calculate", "created, time used = {:.4f} seconds\".format(time()-start)) else: pass def forward(self,", "self.downsample_factor) hop = self.hop_length CQT = get_cqt_complex(x, self.wcos, self.wsin, hop,", "kernels will also be caluclated and the CQT kernels will", "batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1) log_spec = torch.max(log_spec, batch_wise_max - self.top_db) return", "# print(CQT.shape) # print(self.lenghts.view(-1,1).shape) # Normalizing the output with the", "= complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag), (fourier_real, fourier_imag)) CQT = torch.stack((CQT_real,-CQT_imag),-1) if self.norm:", "usage is same as ``torch.nn.Module``. This alogrithm uses the resampling", "= log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1) log_spec = torch.max(log_spec, batch_wise_max - self.top_db) return log_spec", "print(\"CQT kernels created, time used = {:.4f} seconds\".format(time()-start)) # creating", "# Initializing the rebuilt magnitude spectrogram rebuilt = torch.zeros(*angles.shape, device=self.device)", "time from nnAudio.librosa_functions import * from nnAudio.utils import * sz_float", "nn.ConstantPad1d(self.kernel_width//2, 0) elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2) x", "CQT Q = 1/(2**(1/bins_per_octave)-1) print(\"Creating CQT kernels ...\", end='\\r') start", "part. signal lies in the real part real = a1", "basis_norm is for normalizing basis self.hop_length = hop_length self.pad_mode =", "complex number,\" \\ \"make sure our tensor is in the", "'STFT kernel size = {}, CQT kernel size = {}'.format(", "2**(self.n_octaves-1) # is make it same mag as 1992 CQT", "low frequency region where freq < 40Hz. \"\"\" def __init__(self,", "3 else pred target = target.unsqueeze(1) if target.ndim == 3", "fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False, trainable_CQT=False,", "during model training. Default value is ``False``. trainable_STFT : bool", "12. norm : int Normalization for the CQT kernels. ``1``", "window size for the STFT. Default value is 2048 n_mels", "...\", end='\\r') start = time() kernel_sin, kernel_cos, self.bins2freq, _, window", "or refresh_win==True: self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten() self.nonzero_indices =", "frame. i.e. input shape = (batch, n_fft, 1) \"\"\" def", "kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels) self.register_parameter('kernel_sin', kernel_sin) self.register_parameter('kernel_cos', kernel_cos) else: self.register_buffer('kernel_sin',", "else: real = real[:, :length] return real class Griffin_Lim(torch.nn.Module): \"\"\"", "trainable_STFT=False, trainable_CQT=False, bins_per_octave=12, output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'): super().__init__() #", "window=self.w, pad_mode=self.pad_mode) # Phase update rule angles[:,:,:] = rebuilt[:,:,:] -", "/ bins_per_octave)) if verbose==True: print(\"num_octave = \", self.n_octaves) # Calculate", "layer. Default value is 'cpu'. Returns ------- spectrogram : torch.tensor", "input signal. Input signal should be in either of the", "verbose=True, **kwargs): super().__init__() self.stride = hop_length self.center = center self.pad_mode", "the output with the downsampling factor, 2**(self.n_octaves-1) is make it", "return (real, -imag) def inverse(self,x_real,x_imag): \"\"\" Convert a batch of", "CQT kernel, if ``True``, the time index is the center", "timesteps, 2)\" # If the input spectrogram contains only half", "target.ndim == 3 else target loss = (pred - target).pow(2).sum(-2).mean()", "{:.4f} seconds\".format(time()-start)) # Caluate num of filter requires for the", "center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False, trainable_window=False, verbose=True, refresh_win=True): super().__init__() #", "device=self.device) angles = torch.empty((*S.shape,2), device=self.device) angles[:, :,:,0] = torch.cos(2 *", "* batch_size optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs) losses = [] for", "downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor) hop = self.hop_length CQT = get_cqt_complex(x, self.cqt_kernels_real,", "= torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, center=self.center) return", "kernel size = {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT1992v2(torch.nn.Module): \"\"\"This", "later self.w = torch.tensor(get_window(window, int(self.win_length), fftbins=True), device=device).float() def forward(self, S):", "for DCT basis **kwargs Other arguments for Melspectrogram such as", "The window size. Default value is 2048. n_iter=32 : int", "# print(\"remainder = \", remainder) if remainder==0: # Calculate the", "fmin=50, fmax=6000, sr=22050, trainable=False, output_format=\"Complex\", verbose=True): super().__init__() # Trying to", "stft start = time() # Creating kernel for mel spectrogram", "waveforms to spectrums. Parameters ---------- x : torch tensor Input", "---------- sr : int The sampling rate for the input", "self.n_fft = n_fft # Create filter windows for stft wsin,", "(STFT) of the input signal. Input signal should be in", "spec2wav conversion # print(f'win_length={self.win_length}\\tw={self.w.shape}') inverse = torch.istft(S.unsqueeze(-1) * angles, self.n_fft,", "if this argument is True if verbose==True: print(\"Creating early downsampling", "CQT elif output_format=='Complex': return torch.stack((CQT_real,CQT_imag),-1) elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))", "Default value is ``False``. output_format : str Determine the return", "nn.ConstantPad1d(self.pad_amount, 0) elif self.pad_mode == 'reflect': if self.num_samples < self.pad_amount:", "import conv1d, conv2d, fold import numpy as np from time", "= torch.tensor(fft_basis.imag.astype(np.float32)) if verbose==True: print(\"CQT kernels created, time used =", "# If center==True, the STFT window will be put in", "int(np.ceil(float(n_bins) / bins_per_octave)) if verbose==True: print(\"num_octave = \", self.n_octaves) #", "(1992). Parameters ---------- sr : int The sampling rate for", "self.trainable = trainable self.pad_amount = self.n_fft // 2 self.window =", "consider using Griffin-Lim.\" if onesided: X = extend_fbins(X) # extend", "= hop//2 x_down = downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real,", "downsampling factor, 2**(self.n_octaves-1) # is make it same mag as", "the downsampling factor, 2**(self.n_octaves-1) # is make it same mag", "magnitude spectrogram rebuilt = torch.zeros(*angles.shape, device=self.device) for _ in range(self.n_iter):", "CQT kernels ...\", end='\\r') start = time() basis, self.n_fft, lenghts", "+ length] else: real = real[:, :length] return real def", "= int(win_length // 4) self.output_format = output_format self.trainable = trainable", "the correct inverse. If trainability is not required, it is", "self.wcos, self.wsin, hop, self.padding) CQT = torch.cat((CQT1, CQT),1) CQT =", "bool Putting the CQT keneral at the center of the", "X.shape[2], self.stride, self.n_fft).flatten() self.nonzero_indices = (self.w_sum>1e-10) else: pass real[:, self.nonzero_indices]", "loss_fn(pred_mel, melspec) losses.append(loss.item()) loss.backward() optimizer.step() # Check conditions if not", "broadcast dimensions to support 2D convolution X_real_bc = X_real.unsqueeze(1) X_imag_bc", "sampling rate is very important for calculating the correct frequency.", "fmax : int The ending frequency for the highest frequency", "uses the method proposed in [1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2`", "verbose=True, refresh_win=True): super().__init__() # Trying to make the default setting", "= get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) # Getting the top", "self.downsample_factor=1. # Preparing CQT kernels if verbose==True: print(\"Creating CQT kernels", "at the center of the time-step or not. If ``False``,", "Mel spectrograms from the audio clips, then the discrete cosine", "gradient of {grad_threshold} reached. Stopping optimization.\") break pred_stft = pred_stft.detach().clamp(eps)", "= 10.0 * torch.log10(torch.max(S, self.amin)) log_spec -= 10.0 * torch.log10(torch.max(self.amin,", "# self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width print(\"Creating STFT kernels ...\", end='\\r') start = time()", "MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``. Examples -------- >>> spec_layer", "to deal with the filter and other tensors def __init__(self,", "iSTFT=False, fmin=50, fmax=6000, sr=22050, trainable=False, output_format=\"Complex\", verbose=True): super().__init__() # Trying", "0] /= np.sqrt(N) * 2 V[:, :, 1:] /= np.sqrt(N", "stored as ``(real, imag)`` in the last axis. Default value", "= torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable) # self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable) # Applying", "1] # broadcast dimensions to support 2D convolution X_real_bc =", "if verbose==True: print(\"Early downsampling filter created, \\ time used =", "spec_layer(x) \"\"\" def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10,", "MFCCs. Therefore, the Mel spectrogram part can be made trainable", "for _power_to_db if amin <= 0: raise ParameterError('amin must be", "2).\"\\ \"\\nIf you have a magnitude spectrogram, please consider using", "frequency for the lowest CQT bin. Default is 32.70Hz, which", "# Applying window functions to the Fourier kernels window_mask =", "if trainable_mel==True: # self.mel_basis = torch.nn.Parameter(self.mel_basis) # if trainable_STFT==True: #", "---------- n_fft : int The window size. Default value is", "torch.tensor(kernel_cos * window) cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32)) if", "complex number is stored as ``(real, imag)`` in the last", "new variable for downsampling for i in range(self.n_octaves-1): hop =", "self.register_buffer('lenghts', lenghts) self.basis = basis # These cqt_kernel is already", "dtype=torch.float) def forward(self,x): \"\"\" Convert a batch of waveforms to", "Initializing the rebuilt magnitude spectrogram rebuilt = torch.zeros(*angles.shape, device=self.device) for", "``Complex`` or ``Phase``. Default value is ``Complex``. \"\"\" output_format =", "is ``False``. output_format : str Determine the return type. ``Magnitude``", "default value is 'hann' pad_mode : str The padding method.", "dct applies to the frequency axis x_shape = x.shape N", "self.hop_length = hop_length # Creating window function for stft and", "returns a tensor of MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``.", "---------- x_real : torch tensor Real part of the signal.", "* \\ torch.sqrt(self.lenghts.view(-1,1)) if output_format=='Magnitude': if self.trainable==False: # Getting CQT", "variables trainable kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels) kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels)", "STFT. center : bool Putting the iSTFT keneral at the", "downsampling later if possible self.trainable = trainable self.output_format = output_format", "= self._power_to_db(x) x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:] return x def extra_repr(self)", "range(self.n_iter): tprev = rebuilt # Saving previous rebuilt magnitude spec", "'Magnitude'. verbose : bool If ``True``, it shows layer information.", "the Nyquist frequency, \\ please reduce the n_bins'.format(fmax_t)) if self.earlydownsample", "* 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave)) lenghts = np.ceil(Q *", "to get back another half if onesided: X = extend_fbins(X)", "pad_mode self.n_fft = n_fft self.freq_bins = freq_bins self.trainable = trainable", "if verbose==True: print(\"Low pass filter created, time used = {:.4f}", "in the frequency domain cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32))", "bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True): super().__init__()", "= -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) if output_format=='Magnitude': if", "/ (2 * N) W_r = torch.cos(k) W_i = torch.sin(k)", "return 'Mel filter banks size = {}, trainable_mel={}'.format( (*self.mel_basis.shape,), self.trainable_mel,", "= \",self.downsample_factor) # print(CQT.shape) # print(self.lenghts.view(-1,1).shape) # Normalizing the output", "assert S.dim()==3 , \"Please make sure your input is in", "spectrograms back to waveforms. It only works for the complex", "onesided=False) # TODO: Can make the W_r and W_i trainable", "= nn.Parameter(pred_stft, requires_grad=True) sgd_kwargs[\"lr\"] = sgd_kwargs[\"lr\"] * batch_size optimizer =", "# Normalize again to get same result as librosa CQT", "Other arguments for Melspectrogram such as n_fft, n_mels, hop_length, and", "calculating the correct frequency. n_fft : int The window size", "melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None): \"\"\"", "self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1)) # Making all these variables nn.Parameter, so that", "torch.sqrt(self.lenghts.view(-1,1)) CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) if", "= mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm) mel_basis =", "freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis = basis #", "= output_format # creating kernels for CQT Q = 1/(2**(1/bins_per_octave)-1)", "# CQT CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1))", "= create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # For", "= torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real)", "fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave) self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top", "else: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if trainable_CQT: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real,", "CQT CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag), (fourier_real, fourier_imag)) CQT =", "pad_mode self.norm = norm self.output_format = output_format # creating kernels", "the top bin frequency fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave) self.fmin_t = fmax_t/2**(1-1/bins_per_octave)", "the dim same as log_spec so that it can be", "feature before `torch.fft` was made avaliable. The inverse function only", "This Griffin Lim is a direct clone from librosa.griffinlim. [1]", "pred_stft def inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None): default_mel_inversion_params = {} default_stft_inversion_params", "i in range(self.n_octaves-1): hop = hop//2 x_down = downsampling_by_2(x_down, self.lowpass_filter)", "the original implmentation. ''' log_spec = 10.0 * torch.log10(torch.max(S, self.amin))", "print(\"STFT kernels created, time used = {:.4f} seconds\".format(time()-start)) if trainable_STFT:", "conv2d(X_imag_bc, self.kernel_sin, stride=(1,1)) # compute real and imag part. signal", "from librosa. This class inherits from ``torch.nn.Module``, therefore, the usage", "uses the resampling method proposed in [1]. Instead of convoluting", "STFT. window : str The windowing function for iSTFT. It", "melspec.detach().view(-1, n_mels, time) if random_start: pred_stft_shape = (batch_size, n_freq, time)", "b2 = conv1d(x_imag, self.wsin, stride=self.stride) imag = a2+b1 real =", "is not functioning self.trainable = trainable self.hop_length = hop_length self.center", "time) pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps) else: pred_stft = (torch.pinverse(mel_basis)", "and ``bins_per_octave``. If ``fmax`` is not ``None``, then the argument", "= torch.tensor(kernel_cos * window) cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32))", "transitionBandwidth=0.001 ) ) # Broadcast the tensor to the shape", "Default value is ``False``. trainable_window : bool Determine if the", "= int(win_length // 4) self.n_fft = n_fft self.win_length = win_length", "\"\"\" if refresh_win==None: refresh_win=self.refresh_win assert X.dim()==4 , \"Inverse iSTFT only", "[1] <NAME>., <NAME>., & <NAME>. “A fast Griffin-Lim algorithm,” IEEE", "= sgd_kwargs[\"lr\"] * batch_size optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs) losses =", "2048 n_mels : int The number of Mel filter banks.", "end='\\r') start = time() kernel_sin, kernel_cos, self.bins2freq, _, window =", "CQT result by dividing n_fft # basis_norm is for normalizing", "To activate the iSTFT module or not. By default, it", "= loss_fn(pred_mel, melspec) losses.append(loss.item()) loss.backward() optimizer.step() # Check conditions if", "It only works for the complex value spectrograms. If you", "a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;", "self.pad_mode == 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x, output_format=None): \"\"\"", "refresh_win=True): super().__init__() # Trying to make the default setting same", "Putting the iSTFT keneral at the center of the time-step", "value is ``False``. output_format : str Determine the return type.", "# Use extend_fbins function to get back another half if", "# Create filter windows for stft kernel_sin, kernel_cos, self.bins2freq, self.bin_list,", "filter banks will also be calculated and the Mel filter", "kernels Q = 1/(2**(1/bins_per_octave)-1) # Creating lowpass filter and make", "shape that fits conv1d self.register_buffer('lowpass_filter', lowpass_filter[None,None,:]) if verbose==True: print(\"Low pass", "self.register_parameter('window_mask', window_mask) else: self.register_buffer('window_mask', window_mask) if verbose==True: print(\"iSTFT kernels created,", "self.register_buffer('lenghts', lenghts) self.basis=basis fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral", "inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not", "super().__init__() # norm arg is not functioning self.hop_length = hop_length", "Caluate num of filter requires for the kernel # n_octaves", "value is 2048 n_mels : int The number of Mel", "equivalent to ``n_fft//4``. window : str The windowing function for", ":, ::2], x[:, :, 1::2].flip([2])], dim=2) Vc = torch.rfft(v, 1,", "length shorter than reflect padding length (n_fft // 2).\") padding", "for CQT. It uses ``scipy.signal.get_window``, please refer to scipy documentation", "remove redundant parts spec_real = spec_real[:, :self.freq_bins, :] spec_imag =", "# create_lowpass_filter( # band_center = 0.50, # kernelLength=256, # transitionBandwidth=0.001))", "if verbose: print(f\"Target error of {loss_threshold} reached. Stopping optimization.\") break", "so that the model can be used with nn.Parallel #", "timesteps, 2).\"\\ \"\\nIf you have a magnitude spectrogram, please consider", "input audio. It is used to calculate the correct ``fmin``", "0] * W_r - Vc[:, :, :, 1] * W_i", "suppresses all prints. device : str Choose which device to", "the stft layer. No need for center self.stft = STFT(n_fft=n_fft,", "self.earlydownsample = get_early_downsample_params(sr, hop_length, fmax_t, Q, self.n_octaves, verbose) self.register_buffer('early_downsample_filter', early_downsample_filter)", "1] * W_i if norm == 'ortho': V[:, :, 0]", "time index is the beginning of the CQT kernel, if", ":] spec_imag = spec_imag[:, :self.freq_bins, :] if output_format=='Magnitude': spec =", "self.pad_mode == 'constant': self.padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode ==", "win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False,", "is very important for calculating the correct frequency. hop_length :", "``False``, it suppresses all prints Returns ------- spectrogram : torch.tensor", "minium bins if fmax_t > sr/2: raise ValueError('The top bin", "and loss < loss_threshold: if verbose: print(f\"Target error of {loss_threshold}", "The window size. Default value is 2048. freq_bins : int", "This algorithm is using the resampling method proposed in [1].", "for STFT kernels will also be caluclated and the STFT", "return torch.sqrt(CQT.pow(2).sum(-1)) else: return torch.sqrt(CQT.pow(2).sum(-1)+1e-8) elif output_format=='Complex': return CQT elif", "kernel size. The result with and without early downsampling are", "stride=self.hop_length) # CQT CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag), (fourier_real, fourier_imag))", "later if possible self.trainable = trainable self.output_format = output_format #", "stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1)) # compute real and", "windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to", "filter windows for stft kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask =", "torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis = basis # These cqt_kernel is", "shows layer information. If ``False``, it suppresses all prints device", "the time index is the center of the STFT kernel.", "shape will be inferred autommatically if the input follows these", "Then we keep downsampling the input audio by a factor", "40Hz. Parameters ---------- sr : int The sampling rate for", "device=mel_basis.device).normal_().clamp_(eps) else: pred_stft = (torch.pinverse(mel_basis) @ melspec).clamp(eps) pred_stft = nn.Parameter(pred_stft,", "* window_mask wcos = kernel_cos * window_mask if self.trainable==False: self.register_buffer('wsin',", "# kernelLength=256, # transitionBandwidth=0.001)) lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.50,", "reached. Stopping optimization.\") break pred_stft = pred_stft.detach().clamp(eps) ** 0.5 pred_stft", "maps the n_fft to mel bins. Default value is 128.", "freq_scale='no') # Converting kernels from numpy arrays to torch tensors", "of the CQT kernel, if ``True``, the time index is", "the same as the STFT in order to obtain the", "str Control the type of spectrogram to be return. Can", "all these variables trainable kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels) kernel_cos =", "possible windowing functions. The default value is 'hann'. center :", "center of the CQT kernel. Default value if ``True``. pad_mode", "created, time used = {:.4f} seconds\".format(time()-start)) if trainable_STFT: wsin =", "Q transform.” (1992). early downsampling factor is to downsample the", "rate is very important for calculating the correct frequency. trainable", "frames real = overlap_add(real, self.stride) # Prepare the window sumsqure", "Transform (STFT) # We set the frequency range in the", "and ``fmax``. Setting the correct sampling rate is very important", "do not need to be windowed window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1) #", "is to calculate the CQT of the input signal. Input", "in range(self.n_iter): tprev = rebuilt # Saving previous rebuilt magnitude", "lower octave. The kernel creation process is still same as", "= torch.nn.Parameter(wcos, requires_grad=self.trainable) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) # Prepare the", "kernelLength=256, # transitionBandwidth=0.001)) lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.50, kernelLength=256,", "the Mel scale is logarithmic. The default value is ``False``.", "inferred automatically if the input follows these 3 shapes. Most", "shapes. Most of the arguments follow the convention from librosa.", "= broadcast_dim(x_real) x_imag = broadcast_dim(x_imag) x_real.transpose_(1,2) # Prepare the right", "# print(self.lenghts.view(-1,1).shape) # Normalizing the output with the downsampling factor,", "center=self.center) # wav2spec conversion rebuilt = torch.stft(inverse, self.n_fft, self.hop_length, win_length=self.win_length,", "index is the center of the iSTFT kernel. Default value", "a direct clone from librosa.griffinlim. [1] <NAME>., <NAME>., & <NAME>.", "= hop_length # Creating window function for stft and istft", "``fmax`` is not ``None``. bins_per_octave : int Number of bins", "create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # For normalization", "losses return pred_stft def inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None): default_mel_inversion_params =", "rule. The default value is ``0.99``. device : str Choose", "# basis_norm is for normalizing basis self.hop_length = hop_length self.pad_mode", "is calcuated to obtain the final MFCCs. Therefore, the Mel", "is a direct clone from librosa.griffinlim. [1] <NAME>., <NAME>., &", "= norm # Now norm is used to normalize the", "CQT kenral from time domain to freq domain # These", "Griffin Lim is a direct clone from librosa.griffinlim. [1] <NAME>.,", "{:.4f} seconds\".format(time()-start)) # creating kernels for stft # self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width #", "used = {:.4f} seconds\".format(time()-start)) # Caluate num of filter requires", "trainability is not required, it is recommended to use the", "have a magnitude spectrogram, please consider using Griffin-Lim.\" if onesided:", "super().__init__() self.n_fft = n_fft self.win_length = win_length self.n_iter = n_iter", "nothing. sr : int The sampling rate for the input", "output_format = output_format or self.output_format x = broadcast_dim(x) if self.center:", "self.output_format = output_format self.trainable = trainable self.stride = hop_length self.center", "the lowest frequency bin for the top octave kernel self.fmin_t", "output length of the original waveform, please set `length` as", "center of the iSTFT kernel. Default value if ``True``. Please", "real = real[:, :length] return real class Griffin_Lim(torch.nn.Module): \"\"\" Converting", "device=device).float() def forward(self, S): \"\"\" Convert a batch of magnitude", "will be automatically broadcast to the right shape \"\"\" output_format", "if hop_length==None: hop_length = int(win_length // 4) self.output_format = output_format", "Nan gradient when sqrt(0) due to output=0 else: return torch.sqrt(spec)", "self.output_format x = broadcast_dim(x) if self.earlydownsample==True: x = downsampling_by_n(x, self.early_downsample_filter,", "same as the forward STFT. fmax : int The ending", "``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.CQT2010v2() >>>", "Remove padding if length is None: if self.center: real =", "1992 CQT = CQT*self.downsample_factor if output_format=='Magnitude': # Getting CQT Amplitude", "therefore, the usage is same as ``torch.nn.Module``. This alogrithm uses", "torch.sqrt(spec) elif output_format=='Complex': return torch.stack((spec_real,-spec_imag), -1) # Remember the minus", "Mel filter banks are trainable or not. If ``True``, the", "be the same as the STFT in order to obtain", "the same as the forward STFT. freq_scale : 'linear', 'log',", "use `inverse`\") assert X.dim()==4 , \"Inverse iSTFT only works for", "convention from librosa. This class inherits from ``torch.nn.Module``, therefore, the", "conv1d(x_real, self.wsin, stride=self.stride) b1 = conv1d(x_imag, self.wcos, stride=self.stride) b2 =", "print(\"Low pass filter created, time used = {:.4f} seconds\".format(time()-start)) #", "Convert a batch of waveforms to MFCC. Parameters ---------- x", "prints Returns ------- spectrogram : torch.tensor It returns a tensor", "total numbers of CQT bins. Default is 84. Will be", "filter instead of here. if verbose==True: print(\"Creating STFT kernels ...\",", "= self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) else: # Calculate the top bin frequency fmax_t", "you have a magnitude spectrogram, please consider using Griffin-Lim.\" if", "at Nyquist frequency with linear spacing. center : bool Putting", "real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices]) # Remove padding if length is", "\"\"\" Experimental feature before `torch.fft` was made avaliable. The inverse", "spec = self.stft(x, output_format='Magnitude')**self.power melspec = torch.matmul(self.mel_basis, spec) return melspec", "inverse # if self.center: # if self.pad_mode == 'constant': #", "input audio is trainable or not. Default is ``False`` trainable_CQT", "torch tensors wsin = torch.tensor(kernel_sin * window) wcos = torch.tensor(kernel_cos", "input is equivalent to the next lower octave. The kernel", "= Spectrogram.CQT2010v2() >>> specs = spec_layer(x) \"\"\" # To DO:", "real[:, :length] return real class Griffin_Lim(torch.nn.Module): \"\"\" Converting Magnitude spectrograms", "original waveform, please set `length` as your intended waveform length.", "be updated during model training. Default value is ``False``. output_format", "{} stft_inversion_params = stft_inversion_params or {} if mel_inversion_params: mel_inversion_params =", "output_format = output_format or self.output_format x = broadcast_dim(x) if self.earlydownsample==True:", "in either of the following shapes.\\n 1. ``(len_audio)``\\n 2. ``(num_audio,", "self.win_length = win_length self.stride = hop_length self.center = center self.pad_amount", "int The ending frequency for the highest frequency bin. If", "requires_grad=self.trainable) # self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable) # Applying window functions", "kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no') wsin", "phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self) -> str:", "x_real.transpose_(1,2) # Prepare the right shape to do inverse x_imag.transpose_(1,2)", "the calculation of a constant Q transform.” (1992). Early downsampling", "'linear', 'log', or 'no' Determine the spacing between each frequency", "kernel_sin * window wcos = kernel_cos * window wsin =", "The default value is 'hann' pad_mode : str The padding", "# self.wsin = torch.nn.Parameter(self.wsin) # self.wcos = torch.nn.Parameter(self.wcos) def forward(self,", "# Calculate the top bin frequency fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) else:", "to connect all the frames real = overlap_add(real, self.stride) #", "{:.4f} seconds\".format(time()-start)) if trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos =", "to calculate the correct ``fmin`` and ``fmax``. Setting the correct", "where freq < 40Hz. \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=32.70,", "be in either of the following shapes.\\n 1. ``(len_audio)``\\n 2.", "result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``; ``Phase``", "CQT[:,-self.n_bins:,:] # Removing unwanted bottom bins # print(\"downsample_factor = \",self.downsample_factor)", "the input audio is downsampled, the CQT relative to the", "the shape of (batch, freq_bins, timesteps)\" # Initializing Random Phase", "DO: # need to deal with the filter and other", "self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) print(\"STFT kernels created, time used =", "or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.STFT() >>> specs", "is ``None`` which is equivalent to ``n_fft//4``. window : str", "self.kernel_cos_inv, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1)) # compute real", "it suppresses all prints device : str Choose which device", "obtain the final MFCCs. Therefore, the Mel spectrogram part can", "fmin : float The frequency for the lowest CQT bin.", "pass real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices]) # Remove padding if length", "Calculate the lowest frequency bin for the top octave kernel", "norm=basis_norm, topbin_check=False) # This is for the normalization in the", "shape output_format : str Control the type of spectrogram to", "if trainable_window: window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window) self.register_parameter('window_mask', window_mask) else: self.register_buffer('window_mask',", "of Signal Processing to Audio and Acoustics (pp. 1-4), Oct.", "'no' Determine the spacing between each frequency bin. When `linear`", "kernel size = {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT2010(torch.nn.Module): \"\"\"", "and the STFT kernels will be updated during model training.", "purpose # Please don't use the following classes # class", "highest CQT bin. Default is ``None``, therefore the higest CQT", "downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor) hop = self.hop_length CQT = get_cqt_complex(x, self.wcos,", "caluclated and the CQT kernels will be updated during model", "momentum self.device = device if win_length==None: self.win_length=n_fft else: self.win_length=win_length if", "Method for debugging \"\"\" x = broadcast_dim(x) if self.center: if", "pred_stft = pred_stft.detach().clamp(eps) ** 0.5 pred_stft = pred_stft.view((*shape[:-2], n_freq, time))", "result by dividing n_fft # basis_norm is for normalizing basis", "value is ``False``. trainable_STFT : bool Determine if the STFT", "def to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False,", "Overlap and Add algorithm to connect all the frames real", "stride=self.hop_length) CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) # Getting CQT Amplitude", "CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) # Getting the", "It is used to calculate the correct ``fmin`` and ``fmax``.", "to convoluting it with the small CQT kernel. Everytime the", ":, :, 0] * W_r - Vc[:, :, :, 1]", "calculate the CQT of the input signal. Input signal should", "window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr,", "log_spec def _dct(self, x, norm=None): ''' Refer to https://github.com/zh217/torch-dct for", "{grad_threshold} reached. Stopping optimization.\") break pred_stft = pred_stft.detach().clamp(eps) ** 0.5", "# remove redundant parts spec_real = spec_real[:, :self.freq_bins, :] spec_imag", "of spectrograms. ``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; ``shape =", "0], X[:, :, :, 1] # broadcast dimensions to support", "inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None): default_mel_inversion_params = {} default_stft_inversion_params = {}", "CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) else: CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8) return", "is 'Magnitude'. verbose : bool If ``True``, it shows layer", "print(\"Getting cqt kernel done, n_fft = \",self.n_fft) # Preparing kernels", "``onesided=False`` To make sure the inverse STFT has the same", "Therefore, the Mel spectrogram part can be made trainable using", "if you want to use `inverse`\") assert X.dim()==4 , \"Inverse", "the input follows these 3 shapes. Most of the arguments", "``1`` means L1 normalization, and ``2`` means L2 normalization. Default", "spectrums. Parameters ---------- x : torch tensor Input signal should", "= spec_layer(x) \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84,", "Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) else: return torch.sqrt(CQT.pow(2).sum(-1)+1e-8) elif output_format=='Complex':", "the CQT kernel. Default value if ``True``. pad_mode : str", "pred_mel.detach(), losses return pred_stft def inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None): default_mel_inversion_params", "refresh_win start = time() # Create the window function and", "Convert a batch of waveforms to Mel spectrograms. Parameters ----------", "else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) if verbose==True: print(\"CQT kernels created,", "= get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) CQT = torch.cat((CQT1, CQT),1)", "self.earlydownsample==True: x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor) hop = self.hop_length CQT", "= torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag =", "self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) print(\"STFT kernels created, time used = {:.4f} seconds\".format(time()-start))", "``no``, this argument does nothing. fmax : int The ending", "a batch of waveforms. Examples -------- >>> spec_layer = Spectrogram.iSTFT()", "type-II DCT at the moment. Input signal should be in", "1::2].flip([2])], dim=2) Vc = torch.rfft(v, 1, onesided=False) # TODO: Can", "window function and prepare the shape for batch-wise-time-wise multiplication #", "is 'hann'. Please make sure the value is the same", "= spec_real.pow(2) + spec_imag.pow(2) if self.trainable==True: return torch.sqrt(spec+1e-8) # prevent", "trainable_mel==True: # self.mel_basis = torch.nn.Parameter(self.mel_basis) # if trainable_STFT==True: # self.wsin", "librosa.griffinlim. [1] <NAME>., <NAME>., & <NAME>. “A fast Griffin-Lim algorithm,”", "the lowest Mel filter bank. fmax : int The ending", "be return. Can be either ``Magnitude`` or ``Complex`` or ``Phase``.", "size. Default value is ``None`` which is equivalent to ``n_fft//4``.", "The default value is 'hann'. freq_scale : 'linear', 'log', or", "the shape (freq_bins, 1, n_fft, 1) to support 2D Conv", "layer. No need for center self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length,", "self.earlydownsample = earlydownsample # TODO: activate early downsampling later if", "if ``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.STFT()", "scipy documentation for possible windowing functions. The default value is", "nn.ReflectionPad1d(self.pad_amount) x = padding(x) spec_imag = conv1d(x, self.wsin, stride=self.stride) spec_real", "{:.4f} seconds\".format(time()-start)) else: pass def forward(self, x, output_format=None): \"\"\" Convert", "that fits conv1d self.register_buffer('lowpass_filter', lowpass_filter[None,None,:]) if verbose==True: print(\"Low pass filter", "waveforms to CQT spectrograms. Parameters ---------- x : torch tensor", "``(num_audio, len_audio)``\\n 3. ``(num_audio, 1, len_audio)`` The correct shape will", "'cpu' \"\"\" def __init__(self, n_fft, n_iter=32, hop_length=None, win_length=None, window='hann', center=True,", "not share the same memory... kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0)", "tensor is in the shape of (batch, freq_bins, timesteps, 2).\"\\", "kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1) # Decide", "print(\"Mel filter created, time used = {:.4f} seconds\".format(time()-start)) else: pass", "shorter than reflect padding length (n_fft // 2).\") padding =", "nnAudio.utils import * sz_float = 4 # size of a", "fmax=None, n_bins=84, trainable_STFT=False, trainable_CQT=False, bins_per_octave=12, output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'):", "basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False, trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True): super().__init__() self.norm", "torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) else: CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8) return CQT elif output_format=='Complex': return", "self.norm: CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude':", "If ``True``, the gradients for STFT kernels will also be", "= torch.abs(torch.tensor([ref])) self.register_buffer('amin', amin) self.register_buffer('ref', ref) self.top_db = top_db self.n_mfcc", "(batch, n_fft, 1) \"\"\" def __init__(self, n_fft=2048, freq_bins=None, hop_length=512, window='hann',", "last axis. Default value is 'Magnitude'. verbose : bool If", "# elif self.pad_mode == 'reflect': # padding = nn.ReflectionPad1d(self.n_fft//2) #", "by using conv1d # remove redundant parts spec_real = spec_real[:,", "fmin=220, fmax=None, n_bins=84, trainable_STFT=False, trainable_CQT=False, bins_per_octave=12, output_format='Complex', norm=1, window='hann', center=True,", "seconds\".format(time()-start)) else: pass def forward(self, X, onesided=False, length=None, refresh_win=None): \"\"\"", "broadcast to the right shape \"\"\" output_format = output_format or", "the STFT in order to obtain the correct inverse. If", "the update rule. The default value is ``0.99``. device :", "i in range(max_steps): optimizer.zero_grad() pred_mel = mel_basis @ pred_stft loss", "``2`` means L2 normalization. Default is ``1``, which is same", "wsin) self.register_buffer('wcos', wcos) if trainable_CQT: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag", "``torch.nn.Module``. This alogrithm uses the method proposed in [1]. I", "in the very low frequency region where freq < 40Hz.", "= torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self)", "not functioning self.hop_length = hop_length self.center = center self.pad_mode =", "verbose # Preparing for the stft layer. No need for", "kernel is trainable or not. Default is ``False`` norm :", "be changed during the ``forward`` method. verbose : bool If", "* window) wcos = torch.tensor(kernel_cos * window) cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32))", "print(\"Creating early downsampling filter ...\", end='\\r') start = time() sr,", "fmin, n_bins, bins_per_octave, norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels_real =", "np.float(bins_per_octave)) lenghts = np.ceil(Q * sr / freqs) lenghts =", "spec_layer(x) \"\"\" def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512, window='hann', center=True,", "trainable self.output_format = output_format # It will be used to", "to frequency domain transformation kernel for the input audio is", "torch.tensor(get_window(window, int(self.win_length), fftbins=True), device=device).float() def forward(self, S): \"\"\" Convert a", "self.ref)) if self.top_db is not None: if self.top_db < 0:", "value is 'reflect'. htk : bool When ``False`` is used,", "transform.” (1992). This function is to calculate the CQT of", "Default value is 'reflect'. htk : bool When ``False`` is", "# 0.2.0 import torch import torch.nn as nn from torch.nn.functional", "each frequency bin. When `linear` or `log` is used, the", "x_down = downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop,", "{}Hz has exceeded the Nyquist frequency, \\ please reduce the", "the inverse STFT has the same output length of the", "else: CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8) return CQT elif output_format=='Complex': return torch.stack((CQT_real,CQT_imag),-1)", "iSTFT keneral at the center of the time-step or not.", "half if onesided: X = extend_fbins(X) # extend freq X_real,", "conv1d(x, self.wcos, stride=self.stride) # Doing STFT by using conv1d #", "forward STFT. freq_scale : 'linear', 'log', or 'no' Determine the", ":func:`~nnAudio.Spectrogram.iSTFT` class, which is to convert spectrograms back to waveforms.", "n_bins self.output_format = output_format self.earlydownsample = earlydownsample # TODO: activate", "Normalization for the CQT result. basis_norm : int Normalization for", "the inverse kernel and the forward kernel do not share", "= hop//2 x_down = downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.wcos,", "= conv1d(x, self.wsin, stride=self.hop_length) # CQT CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real,", "``fmax``. If 'no' is used, the bin will start at", "nn.DataParallel mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel) self.register_parameter('mel_basis', mel_basis) else: self.register_buffer('mel_basis', mel_basis)", "nothing. fmax : int The ending frequency for the highest", "seconds\".format(time()-start)) def forward(self,x, output_format=None): \"\"\" Convert a batch of waveforms", "argument does nothing. fmax : int The ending frequency for", "per octave. Default is 12. norm : int Normalization for", "end='\\r') start = time() basis, self.n_fft, lenghts = create_cqt_kernels(Q, sr,", "{:.4f} seconds\".format(time()-start)) # Calculate num of filter requires for the", "same as ``torch.nn.Module``. This alogrithm uses the method proposed in", "to the shape that fits conv1d self.register_buffer('lowpass_filter', lowpass_filter[None,None,:]) if verbose==True:", "wsin) self.register_buffer('wcos', wcos) if self.trainable==True: wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable) wcos", "verbose=None): \"\"\" Best-attempt spectrogram inversion \"\"\" def loss_fn(pred, target): pred", "the usage is same as ``torch.nn.Module``. Parameters ---------- sr :", "freq_bins, timesteps, 2).\"\\ \"\\nIf you have a magnitude spectrogram, please", "which is equivalent to ``n_fft//4``. Please make sure the value", "elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag),", "{:.4f} seconds\".format(time()-start)) def forward(self, x, output_format=None): \"\"\" Convert a batch", "stride) size. Default value is ``None`` which is equivalent to", "self.center = center self.pad_mode = pad_mode self.n_fft = n_fft #", "kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=freq_bins, window=window,", "gradients for STFT kernels will also be caluclated and the", "super().__init__() self.norm = norm # Now norm is used to", "random_start: pred_stft_shape = (batch_size, n_freq, time) pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32,", "x : torch tensor Input signal should be in either", "length] else: real = real[:, :length] return real def extra_repr(self)", "spectrogram start = time() mel_basis = mel(sr, n_fft, n_mels, fmin,", "``(num_audio, 1, len_audio)`` The correct shape will be inferred automatically", "2D convolution X_real_bc = X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1) a1 =", "[1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more computational and", "= torch.nn.Parameter(window_mask, requires_grad=trainable_window) self.register_parameter('window_mask', window_mask) else: self.register_buffer('window_mask', window_mask) if verbose==True:", "early downsampling if this argument is True if verbose==True: print(\"Creating", "10.0 * torch.log10(torch.max(self.amin, self.ref)) if self.top_db is not None: if", "window function for stft and istft later self.w = torch.tensor(get_window(window,", "window) cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32)) if trainable_STFT: wsin", "bool Determine if the CQT kernels are trainable or not.", "still same as the 1992 algorithm. Therefore, we can reuse", "# Initializing Random Phase rand_phase = torch.randn(*S.shape, device=self.device) angles =", "it is recommended to use the ``inverse`` method under the", "Acoustics (pp. 1-4), Oct. 2013. Parameters ---------- n_fft : int", "= self.stft.inverse(recon_stft, **stft_inversion_params) return recon_audio class MFCC(torch.nn.Module): \"\"\"This function is", "def inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None): default_mel_inversion_params = {} default_stft_inversion_params =", "seconds\".format(time()-start)) else: pass def forward(self, x, output_format=None): \"\"\" Convert a", "``onesided=False`` length : int To make sure the inverse STFT", "``Magnitude`` or ``Complex`` or ``Phase``. Default value is ``Complex``. \"\"\"", "fudge factor for normalization ### --------------------------- Spectrogram Classes ---------------------------### class", "the magnitude of the STFT result, shape = ``(num_samples, freq_bins,", "has exceeded the Nyquist frequency, \\ please reduce the n_bins'.format(fmax_t))", "spec_imag = spec_imag[:, :self.freq_bins, :] if output_format=='Magnitude': spec = spec_real.pow(2)", "0) elif self.pad_mode == 'reflect': if self.num_samples < self.pad_amount: raise", "convolution X_real_bc = X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1) a1 = conv2d(X_real_bc,", ") class MelSpectrogram(torch.nn.Module): \"\"\"This function is to calculate the Melspectrogram", "``False`` output_format : str Control the spectrogram output type, either", "= n_fft if hop_length==None: hop_length = int(win_length // 4) self.n_fft", "method. Default value is 'reflect'. inverse : bool To activate", "of the iSTFT kernel, if ``True``, the time index is", "X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1)) b2", "if trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels)", "kernel_sin, kernel_cos, _, _, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=n_fft, window=window,", "kernel_sin and kernel_cos have the shape (freq_bins, 1, n_fft, 1)", "pad_mode=pad_mode, sr=sr, trainable=trainable_STFT, output_format=\"Magnitude\", verbose=verbose, **kwargs) # Create filter windows", ": bool Determine if the time to frequency domain transformation", "The sampling rate for the input audio. It is used", "constant Q transform.” (1992). early downsampling factor is to downsample", ": str The padding method. Default value is 'reflect'. htk", "is ``None`` n_bins : int The total numbers of CQT", "# kernel_sin and kernel_cos have the shape (freq_bins, 1, n_fft,", "x = padding(x) imag = conv1d(x, self.wsin, stride=self.stride) real =", "if output_format=='Magnitude': if self.trainable==False: # Getting CQT Amplitude CQT =", "window size. Default value is 2048. n_iter=32 : int The", "mel_basis) # if trainable_mel==True: # self.mel_basis = torch.nn.Parameter(self.mel_basis) # if", "CQT kernels will be updated during model training. Default value", "Parameters ---------- x_real : torch tensor Real part of the", "torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements, which leads to error", "broadcast to the right shape \"\"\" x = broadcast_dim(x) spec", "= 0.50, # kernelLength=256, # transitionBandwidth=0.001)) lowpass_filter = torch.tensor(create_lowpass_filter( band_center", "normalization, and ``2`` means L2 normalization. Default is ``1``, which", "the same as the forward STFT. sr : int The", "-------- >>> spec_layer = Spectrogram.CQT1992v2() >>> specs = spec_layer(x) \"\"\"", "same length, please use ``refresh_win=None`` to increase computational speed. \"\"\"", "start = time() lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.5, kernelLength=256,", "windowing functions. The default value is 'hann'. center : bool", "calculating phase def inverse(self, X, onesided=True, length=None, refresh_win=True): \"\"\" This", "classes \"\"\" # 0.2.0 import torch import torch.nn as nn", "if the window function is trainable or not. Default value", "the W_r and W_i trainable here k = - torch.arange(N,", "is used to normalize the final CQT result by dividing", "wcos) # Prepare the shape of window mask so that", "as the forward STFT. freq_scale : 'linear', 'log', or 'no'", "increase computational speed. \"\"\" if refresh_win==None: refresh_win=self.refresh_win assert X.dim()==4 ,", "same as the forward STFT. momentum : float The momentum", "is ``False`` trainable_CQT : bool Determine if the frequency domain", "to save time # Unless the input spectrograms have different", "to output=0 else: return torch.sqrt(spec) elif output_format=='Complex': return torch.stack((spec_real,-spec_imag), -1)", "bins per octave. Default is 12. norm : bool Normalization", "the value is the same as the forward STFT. hop_length", "b2)*1j # = (-b2, b1) a1 = conv1d(x_real, self.wcos, stride=self.stride)", "np.pi * rand_phase) angles[:,:,:,1] = torch.sin(2 * np.pi * rand_phase)", "wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax,", "``bins_per_octave``. If ``fmax`` is not ``None``, then the argument ``n_bins``", "self.top_db) return log_spec def _dct(self, x, norm=None): ''' Refer to", "return loss verbose = verbose or self.verbose # SGD arguments", "self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.pad_amount, 0) elif", "Default value is 512. fmin : float The frequency for", "and ``n_bins`` will be calculated automatically. Default is ``None`` n_bins", "Conv kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1) #", "done, n_fft = \",self.n_fft) # If center==True, the STFT window", "functions. The default value is 'hann'. Please make sure the", "\"\"\" def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84, trainable_STFT=False, trainable_CQT=False,", "# Getting the top octave CQT x_down = x #", "fmin : int The starting frequency for the lowest frequency", ": float The momentum for the update rule. The default", "forward STFT. center : bool Putting the iSTFT keneral at", "the calculation of a constant Q transform.” (1992). Parameters ----------", "if win_length==None: win_length = n_fft if hop_length==None: hop_length = int(win_length", "factor of 2 to convoluting it with the small CQT", "self.center: real = real[:, self.pad_amount:self.pad_amount + length] else: real =", "tensor Input signal should be in either of the following", "= {:.4f} seconds\".format(time()-start)) if trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos", "conv1d(x, self.wsin, stride=self.hop_length) # CQT CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag),", "size={}, iSTFT={}, trainable={}'.format( self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable ) class MelSpectrogram(torch.nn.Module):", "number of iterations for Griffin-Lim. The default value is ``32``", "x.shape[-1] x = broadcast_dim(x) if self.center: if self.pad_mode == 'constant':", "from time domain to freq domain # These cqt_kernel is", "Nyquist frequency with linear spacing. center : bool Putting the", "x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor) hop = self.hop_length CQT =", "requires_grad=self.trainable) wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) #", "loss_threshold and loss < loss_threshold: if verbose: print(f\"Target error of", "input audio is downsampled, the CQT relative to the downsampled", "this layer. Default value is 'cpu' Returns ------- spectrogram :", ": string The default value is 'ortho'. Normalization for DCT", "only half of the n_fft # Use extend_fbins function to", "downsampling for i in range(self.n_octaves-1): hop = hop//2 x_down =", "``(num_samples, freq_bins,time_steps, 2)``. The complex number is stored as ``(real,", "normalization used in librosa. window : str The windowing function", "the time-step or not. If ``False``, the time index is", "# if self.center: # if self.pad_mode == 'constant': # padding", "self.trainable==False: # Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) else: CQT", "self.trainable==False: # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) else: return torch.sqrt(CQT.pow(2).sum(-1)+1e-8)", "--------------------------- Spectrogram Classes ---------------------------### class STFT(torch.nn.Module): \"\"\"This function is to", "magnitude spec # spec2wav conversion # print(f'win_length={self.win_length}\\tw={self.w.shape}') inverse = torch.istft(S.unsqueeze(-1)", "device to initialize this layer. Default value is 'cpu' \"\"\"", "self.trainable==True: return torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0) due", "Parameters ---------- x : torch tensor Input signal should be", "# self.mel_basis = torch.nn.Parameter(self.mel_basis) # if trainable_STFT==True: # self.wsin =", "= Vc[:, :, :, 0] * W_r - Vc[:, :,", "torch.tensor(cqt_kernels.real).unsqueeze(1) cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)", "the value is the same as the forward STFT. fmax", "the correct frequency. n_fft : int The window size for", "use with extra care. Parameters ---------- n_fft : int The", "Spectrogram.MelSpectrogram() >>> specs = spec_layer(x) \"\"\" def __init__(self, sr=22050, n_fft=2048,", "torch.tensor(mel_basis) if verbose==True: print(\"STFT filter created, time used = {:.4f}", "str The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please", "spectrogram, please consider using Griffin-Lim.\" if onesided: X = extend_fbins(X)", "CQT relative to the downsampled input is equivalent to the", "kernel done, n_fft = \",self.n_fft) # Preparing kernels for Short-Time", "The padding method. Default value is 'reflect'. trainable : bool", "of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``; ``Complex`` will", "for the complex value spectrograms. If you have the magnitude", "if verbose==True: print(\"Creating STFT kernels ...\", end='\\r') start = time()", "* window wsin = torch.tensor(wsin) wcos = torch.tensor(wcos) if verbose==True:", "freq_bins, time_steps)``; 'Complex' will return the STFT result in complex", "of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins.", "and the end of the output. If your input spectrograms", "wsin = kernel_sin * window wcos = kernel_cos * window", "due to output=0 else: return torch.sqrt(spec) elif output_format=='Complex': return torch.stack((spec_real,-spec_imag),", "spacing. center : bool Putting the STFT keneral at the", "mel_basis = self.mel_basis.detach() shape = melspec.shape batch_size, n_mels, time =", "verbose==True: print(\"Creating CQT kernels ...\", end='\\r') start = time() cqt_kernels,", "2)``; 'Phase' will return the phase of the STFT reuslt,", "create_lowpass_filter( # band_center = 0.50, # kernelLength=256, # transitionBandwidth=0.001)) lowpass_filter", "from nnAudio.librosa_functions import * from nnAudio.utils import * sz_float =", "def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs):", "for the lowest frequency bin. If freq_scale is ``no``, this", "trainable if trainable_window: window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window) self.register_parameter('window_mask', window_mask) else:", "it # same mag as 1992 CQT = CQT*self.downsample_factor #", "inverse function only works for 1 single frame. i.e. input", "for the top octave kernel self.fmin_t = fmin*2**(self.n_octaves-1) remainder =", "False to save GPU memory. fmin : int The starting", "torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self) ->", "mag as 1992 CQT = CQT*self.downsample_factor if output_format=='Magnitude': # Getting", "type, either ``Magnitude``, ``Complex``, or ``Phase``. The output_format can also", "remainder==0: # Calculate the top bin frequency fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)", "freq_bins self.trainable = trainable self.pad_amount = self.n_fft // 2 self.window", "can be used later in inverse self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1)) if verbose==True:", "which is same as the normalization used in librosa. window", "= time() # Create filter windows for stft kernel_sin, kernel_cos,", "\", remainder) if remainder==0: # Calculate the top bin frequency", "= real.squeeze(-2)*self.window_mask # Normalize the amplitude with n_fft real /=", "window sumsqure for division # Only need to create this", "W_i = torch.sin(k) V = Vc[:, :, :, 0] *", "---------- S : torch tensor Spectrogram of the shape ``(batch,", "equavalent to the next lower octave. The kernel creation process", "- b2 real = real.squeeze(-2)*self.window_mask # Normalize the amplitude with", "# Adjusting the top minium bins if fmax_t > sr/2:", "win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, pad_mode='reflect', iSTFT=False, fmin=50, fmax=6000,", "'hann'. center : bool Putting the CQT keneral at the", "the small CQT kernel. Everytime the input audio is downsampled,", "nn.ReflectionPad1d(self.n_fft//2) def forward(self,x,output_format=None): \"\"\" Convert a batch of waveforms to", "the value is the same as the forward STFT. center", ": int Number of bins per octave. Default is 12.", "= broadcast_dim(x_imag) x_real.transpose_(1,2) # Prepare the right shape to do", "2. ``(num_audio, len_audio)``\\n 3. ``(num_audio, 1, len_audio)`` It will be", "If you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. The", "the CQT kernels will be updated during model training. Default", "conv1d # remove redundant parts spec_real = spec_real[:, :self.freq_bins, :]", "device to initialize this layer. Default value is 'cpu'. Returns", "returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)`` if", "output_format or self.output_format x = broadcast_dim(x) if self.center: if self.pad_mode", "Mel-frequency cepstral coefficients norm : string The default value is", "time used = {:.4f} seconds\".format(time()-start)) # Calculate num of filter", "super().__init__() self.stride = hop_length self.center = center self.pad_mode = pad_mode", "== 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x, output_format=None): \"\"\" Convert", "during model training. Default value is ``False``. trainable_window : bool", "# n_octaves determines how many resampling requires for the CQT", "a constant Q transform.” (1992). Parameters ---------- sr : int", "losses.append(loss.item()) loss.backward() optimizer.step() # Check conditions if not loss.isfinite(): raise", "or not. Default is ``False`` trainable_CQT : bool Determine if", "early_downsample_filter, \\ self.earlydownsample = get_early_downsample_params(sr, hop_length, fmax_t, Q, self.n_octaves, verbose)", "Returns ------- MFCCs : torch.tensor It returns a tensor of", "Default is ``None``, which means ``n_fft//2+1`` bins Please make sure", "``n_bins`` will be ignored and ``n_bins`` will be calculated automatically.", "# if trainable_STFT==True: # self.wsin = torch.nn.Parameter(self.wsin) # self.wcos =", "waveforms. Examples -------- >>> spec_layer = Spectrogram.iSTFT() >>> specs =", "be ignored if ``fmax`` is not ``None``. bins_per_octave : int", "bins_per_octave)) # print(\"n_octaves = \", self.n_octaves) # Calculate the lowest", "# Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) else: CQT =", "= a1 - b2 real = real.squeeze(-2)*self.window_mask # Normalize the", "is inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is", "We will activate early downsampling later if possible self.trainable =", "the gradients for STFT kernels will also be caluclated and", "the tensor to the shape that fits conv1d self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])", "== True: # Do early downsampling if this argument is", "transform is calcuated to obtain the final MFCCs. Therefore, the", "= {:.4f} seconds\".format(time()-start)) def forward(self,x, output_format=None): \"\"\" Convert a batch", "on Applications of Signal Processing to Audio and Acoustics (pp.", "= nn.ConstantPad1d(self.pad_amount, 0) elif self.pad_mode == 'reflect': if self.num_samples <", "cqt_kernel is already in the frequency domain cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1)", "window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no') wsin = kernel_sin * window", "to reconstruct the waveforms inverse = torch.istft(S.unsqueeze(-1) * angles, self.n_fft,", ": torch.tensor It returns a tensor of spectrograms. shape =", "* 2 V[:, :, 1:] /= np.sqrt(N / 2) *", "dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps) else: pred_stft = (torch.pinverse(mel_basis) @ melspec).clamp(eps) pred_stft =", "encountered in Mel -> STFT optimization\") if loss_threshold and loss", "or self.verbose # SGD arguments default_sgd_kwargs = dict(lr=1e3, momentum=0.9) if", "Calculate num of filter requires for the kernel # n_octaves", "self.center = center self.pad_amount = self.n_fft // 2 self.refresh_win =", "Watch out for the positive and negative signs # ifft", "str The padding method. Default value is 'reflect'. inverse :", "why I call it version 2. [1] Brown, <NAME>. and", "signal. This algorithm first extracts Mel spectrograms from the audio", "the following classes # class DFT(torch.nn.Module): \"\"\" Experimental feature before", "of a constant Q transform.” (1992). Early downsampling factor is", "assert X.dim()==4 , \"Inverse iSTFT only works for complex number,\"", "back another half if onesided: X = extend_fbins(X) # extend", "value is 'hann'. center : bool Putting the STFT keneral", "for 1 single frame. i.e. input shape = (batch, n_fft,", "sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # For normalization in", "pad_mode='reflect', momentum=0.99, device='cpu'): super().__init__() self.n_fft = n_fft self.win_length = win_length", "pred_mel = mel_basis @ pred_stft loss = loss_fn(pred_mel, melspec) losses.append(loss.item())", ">>> spec_layer = Spectrogram.MFCC() >>> mfcc = spec_layer(x) \"\"\" def", "Default value is 2048. freq_bins : int Number of frequency", "is make it # same mag as 1992 CQT =", "self.register_buffer('kernel_sin', kernel_sin) self.register_buffer('kernel_cos', kernel_cos) # Decide if the window function", "freq_bins, timesteps)\" # Initializing Random Phase rand_phase = torch.randn(*S.shape, device=self.device)", "``False``, the time index is the beginning of the CQT", "0Hz and end at Nyquist frequency with linear spacing. center", "frequency spectrum, we make a small CQT kernel covering only", "is used, the Mel scale is logarithmic. The default value", "will activate early downsampling later if possible self.trainable = trainable", "conv1d(x, self.wsin, stride=self.stride) real = conv1d(x, self.wcos, stride=self.stride) return (real,", "Normalization for the CQT kernels. ``1`` means L1 normalization, and", "n_mels=128, hop_length=512, window='hann', center=True, pad_mode='reflect', power=2.0, htk=False, fmin=0.0, fmax=None, norm=1,", "= center self.pad_mode = pad_mode self.output_format = output_format # creating", "spacing between each frequency bin. When `linear` or `log` is", "time used = {:.4f} seconds\".format(time()-start)) if trainable_STFT: wsin = torch.nn.Parameter(wsin,", "= CQT[:,-self.n_bins:,:] # Removing unwanted bottom bins # print(\"downsample_factor =", "to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation\"\"\" pass # The section below is", "= torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1) # Decide if the Fourier kernels are", "cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) # If center==True, the", "\\ torch.sqrt(self.lenghts.view(-1,1)) if output_format=='Magnitude': if self.trainable==False: # Getting CQT Amplitude", "the STFT kenrels are trainable or not. If ``True``, the", "and the CQT kernels will be updated during model training.", "Recalculating the window sum square. If you have an input", "x = padding(x) # CQT CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length)", "= torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels) kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels) self.register_parameter('kernel_sin', kernel_sin) self.register_parameter('kernel_cos',", "broadcast to the right shape output_format : str Control the", "only works for the complex value spectrograms. If you have", "`torch.fft` was made avaliable. The inverse function only works for", "efficient version. [1] Brown, <NAME>. and <NAME>. “An efficient algorithm", "= conv1d(x_real, self.wsin, stride=self.stride) b1 = conv1d(x_imag, self.wcos, stride=self.stride) b2", "trainable or not. If ``True``, the gradients for STFT kernels", "filter banks maps the n_fft to mel bins. Default value", "STFT keneral at the center of the time-step or not.", "signal lies in the real part real = a1 -", "kernels for CQT Q = 1/(2**(1/bins_per_octave)-1) print(\"Creating CQT kernels ...\",", "self.stride, self.n_fft).flatten() self.nonzero_indices = (self.w_sum>1e-10) else: pass real[:, self.nonzero_indices] =", "frequency, \\ please reduce the n_bins'.format(fmax_t)) if self.earlydownsample == True:", "``length=None``, which will remove ``n_fft//2`` samples from the start and", "start at 0Hz and end at Nyquist frequency with linear", "padding method. Default value is 'reflect'. trainable : bool Determine", "the window function is trainable or not. Default value is", "Mel spectrograms. Parameters ---------- x : torch tensor Input signal", "= {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT2010(torch.nn.Module): \"\"\" This algorithm", "= n_mfcc def _power_to_db(self, S): ''' Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db for", "to initialize this layer. Default value is 'cpu' Returns -------", "stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) *", "trainable kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels) kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels) self.register_parameter('kernel_sin',", "MUSIC PROCESSING.” (2010). [2] Brown, <NAME>. and <NAME>. “An efficient", "self.stride) # Prepare the window sumsqure for division # Only", "sr=22050, trainable_kernels=False, trainable_window=False, verbose=True, refresh_win=True): super().__init__() # Trying to make", "padding = nn.ReflectionPad1d(self.n_fft//2) x = padding(x) imag = conv1d(x, self.wsin,", "the shape for batch-wise-time-wise multiplication # Create filter windows for", "rebuilt # Saving previous rebuilt magnitude spec # spec2wav conversion", "or ``Phase``. Default value is ``Complex``. \"\"\" output_format = output_format", "= (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``; Examples --------", "norm # Now norm is used to normalize the final", "of spectrograms. shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; shape =", "= CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1))", "= CQT*self.downsample_factor if output_format=='Magnitude': # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1))", "training. Default value is ``False``. trainable_STFT : bool Determine if", "shows layer information. If ``False``, it suppresses all prints. device", "'Phase'``; Examples -------- >>> spec_layer = Spectrogram.CQT1992v2() >>> specs =", "pred_stft.grad.max() < grad_threshold: if verbose: print(f\"Target max gradient of {grad_threshold}", "random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None): \"\"\" Best-attempt spectrogram inversion \"\"\"", "window=window, freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT, output_format=\"Magnitude\", verbose=verbose, **kwargs) #", "function for iSTFT. It uses ``scipy.signal.get_window``, please refer to scipy", "int(np.ceil(float(n_bins) / bins_per_octave)) # print(\"n_octaves = \", self.n_octaves) # Calculate", "window Returns ------- MFCCs : torch.tensor It returns a tensor", "factor for normalization ### --------------------------- Spectrogram Classes ---------------------------### class STFT(torch.nn.Module):", "2)``. The complex number is stored as ``(real, imag)`` in", "window='ones', freq_scale='no') wsin = kernel_sin * window wcos = kernel_cos", "if trainable_mel: # Making everything nn.Parameter, so that this model", "nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def", "\"\"\" def loss_fn(pred, target): pred = pred.unsqueeze(1) if pred.ndim ==", "spacing can be controlled by ``fmin`` and ``fmax``. If 'no'", "the lowest CQT bin. Default is 32.70Hz, which coresponds to", "trainable_kernels=False, trainable_window=False, verbose=True, refresh_win=True): super().__init__() # Trying to make the", "window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=verbose) kernel_sin = torch.tensor(kernel_sin, dtype=torch.float)", "window='hann', pad_mode='reflect', earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True): super().__init__() self.norm = norm", "is not None: if self.top_db < 0: raise ParameterError('top_db must", "or {} stft_inversion_params = stft_inversion_params or {} if mel_inversion_params: mel_inversion_params", "CQT spectrograms. Parameters ---------- x_real : torch tensor Real part", "shape = melspec.shape batch_size, n_mels, time = shape[0], shape[-2], shape[-1]", "calculation of a constant Q transform.” (1992). Early downsampling factor", "as ``(real, imag)`` in the last axis. Default value is", "kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1) # Decide if the Fourier kernels", "self.register_buffer('lenghts', lenghts) cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1) cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1) if trainable:", "``output_format='Magnitude'``; shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;", "class is to convert spectrograms back to waveforms. It only", "many resampling requires for the CQT n_filters = min(bins_per_octave, n_bins)", "the forward STFT. fmax : int The ending frequency for", "nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # CQT CQT_real = conv1d(x, self.cqt_kernels_real,", "librosa if win_length==None: win_length = n_fft if hop_length==None: hop_length =", "normalizing basis self.hop_length = hop_length self.pad_mode = pad_mode self.n_bins =", "* np.pi * rand_phase) angles[:,:,:,1] = torch.sin(2 * np.pi *", "more computational and memory efficient version. [1] Brown, <NAME>. and", ":self.freq_bins, :] if output_format=='Magnitude': spec = spec_real.pow(2) + spec_imag.pow(2) if", "angles = torch.empty((*S.shape,2), device=self.device) angles[:, :,:,0] = torch.cos(2 * np.pi", "the right shape \"\"\" output_format = output_format or self.output_format x", "and imag part. signal lies in the real part real", "Default is ``None`` n_bins : int The total numbers of", "angles[:,:,:] = rebuilt[:,:,:] - (self.momentum / (1 + self.momentum)) *", "self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) # Prepare the shape of window", "* rand_phase) angles[:,:,:,1] = torch.sin(2 * np.pi * rand_phase) #", "a batch of waveforms to spectrums. Parameters ---------- x :", "class CQT2010v2(torch.nn.Module): \"\"\"This function is to calculate the CQT of", "0) # elif self.pad_mode == 'reflect': # padding = nn.ReflectionPad1d(self.n_fft//2)", "is ``Complex``. \"\"\" output_format = output_format or self.output_format self.num_samples =", "of magnitude spectrograms to waveforms. Parameters ---------- S : torch", "``0.99``. device : str Choose which device to initialize this", "Fourier kernels window_mask = torch.tensor(window_mask) wsin = kernel_sin * window_mask", "kenral from time domain to freq domain # These cqt_kernel", "= self.to_stft(melspec, **mel_inversion_params) recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params) return recon_audio class", "spectrogram : torch.tensor It returns a tensor of spectrograms. shape", "hop_length==None: hop_length = int(win_length // 4) self.n_fft = n_fft self.win_length", "have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. The parameters (e.g.", "shape of window mask so that it can be used", "``False`` norm : int Normalization for the CQT kernels. ``1``", "be controlled by ``fmin`` and ``fmax``. If 'no' is used,", "waveforms to Mel spectrograms. Parameters ---------- x : torch tensor", "_power_to_db(self, S): ''' Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db for the original implmentation.", "torch.tensor(cqt_kernels.imag).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag,", "length is None: if self.center: real = real[:, self.pad_amount:-self.pad_amount] else:", "specs = spec_layer(x) \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None,", "hop//2 x_down = downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin,", "same memory... kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0) kernel_cos_inv = torch.cat((kernel_cos,", "for complex number,\" \\ \"make sure our tensor is in", "'reflect'. inverse : bool To activate the iSTFT module or", "deal with the filter and other tensors def __init__(self, sr=22050,", "waveforms inverse = torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w,", "stride=self.stride) a2 = conv1d(x_real, self.wsin, stride=self.stride) b1 = conv1d(x_imag, self.wcos,", "fourier_imag = conv1d(x, self.wsin, stride=self.hop_length) # CQT CQT_real, CQT_imag =", "unwanted top bins if self.norm: CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT", "grad_threshold: if verbose: print(f\"Target max gradient of {grad_threshold} reached. Stopping", "lenghts) cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1] print(\"CQT kernels created, time used =", "original 1992 algorithm, that is why I call it version", "x_real = padding(x_real) # x_imag = padding(x_imag) # Watch out", "please use ``onesided=True``, else use ``onesided=False`` length : int To", "1992 algorithm, that is why I call it version 2.", "forward(self, S): \"\"\" Convert a batch of magnitude spectrograms to", "clips, then the discrete cosine transform is calcuated to obtain", "# fudge factor for normalization ### --------------------------- Spectrogram Classes ---------------------------###", "Default value is ``None`` which is equivalent to ``n_fft//4``. Please", "signal. x_imag : torch tensor Imaginary part of the signal.", "``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; shape = ``(num_samples, freq_bins,time_steps, 2)`` if", "return recon_audio class MFCC(torch.nn.Module): \"\"\"This function is to calculate the", "padding if length is None: if self.center: real = real[:,", "to spectrums. Parameters ---------- x : torch tensor Input signal", "Spectrogram.CQT2010v2() >>> specs = spec_layer(x) \"\"\" # To DO: #", "start = time() basis, self.n_fft, lenghts = create_cqt_kernels(Q, sr, self.fmin_t,", "librosa CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': if self.trainable==False: # Getting", "The number of Mel-frequency cepstral coefficients norm : string The", "self.iSTFT, self.trainable ) class MelSpectrogram(torch.nn.Module): \"\"\"This function is to calculate", "= top_db self.n_mfcc = n_mfcc def _power_to_db(self, S): ''' Refer", "'Phase'``; Examples -------- >>> spec_layer = Spectrogram.STFT() >>> specs =", "= Spectrogram.MFCC() >>> mfcc = spec_layer(x) \"\"\" def __init__(self, sr=22050,", "= nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # STFT fourier_real = conv1d(x,", "elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag", "4) self.n_fft = n_fft self.win_length = win_length self.stride = hop_length", "automatically if the input follows these 3 shapes. Most of", "The starting frequency for the lowest Mel filter bank. fmax", "(real/self.n_fft, imag/self.n_fft) class iSTFT(torch.nn.Module): \"\"\"This class is to convert spectrograms", "output_format='Magnitude', verbose=True): super().__init__() self.norm = norm # Now norm is", "output_format : str Determine the return type. ``Magnitude`` will return", "If ``True``, the gradients for Mel filter banks will also", "verbose=False) window_mask = get_window(window,int(win_length), fftbins=True) # For inverse, the Fourier", "= torch.tensor(window_mask) wsin = kernel_sin * window_mask wcos = kernel_cos", "Putting the CQT keneral at the center of the time-step", "window_mask = get_window(window,int(win_length), fftbins=True) # For inverse, the Fourier kernels", "window='hann', center=True, pad_mode='reflect'): super().__init__() # norm arg is not functioning", "x, norm=None): ''' Refer to https://github.com/zh217/torch-dct for the original implmentation.", "Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more computational and memory", "bin. If freq_scale is ``no``, this argument does nothing. Please", "else: pass real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices]) # Remove padding if", "shows layer information. If ``False``, it suppresses all prints Returns", "octave CQT x_down = x # Preparing a new variable", "{:.4f} seconds\".format(time()-start)) def forward(self,x, output_format=None): \"\"\" Convert a batch of", "with the filter and other tensors def __init__(self, sr=22050, hop_length=512,", "signs # ifft = e^(+2\\pi*j)*X # ifft(X_real) = (a1, a2)", "= ``(num_samples, freq_bins,time_steps, 2)``; ``Phase`` will return the phase of", "fourier_real = conv1d(x, self.wcos, stride=self.hop_length) fourier_imag = conv1d(x, self.wsin, stride=self.hop_length)", "'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv') != True): raise NameError(\"Please", "value is the same as the forward STFT. fmax :", "'ortho': V[:, :, 0] /= np.sqrt(N) * 2 V[:, :,", "value is 128. hop_length : int The hop (or stride)", "use ``onesided=False`` To make sure the inverse STFT has the", "htk=False, fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False, verbose=True, **kwargs): super().__init__() self.stride", "Normalize the amplitude with n_fft real /= (self.n_fft) # Overlap", "= fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave)) lenghts =", "is make it same mag as 1992 CQT = CQT*self.downsample_factor", "avaliable. The inverse function only works for 1 single frame.", "CQT of the input signal. Input signal should be in", "suppresses all prints device : str Choose which device to", "phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) class CQT(CQT1992v2): \"\"\"An abbreviation", "for iSTFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation", "is 'hann'. center : bool Putting the CQT keneral at", "pad_mode self.n_bins = n_bins self.earlydownsample = earlydownsample # We will", "x = broadcast_dim(x) spec = self.stft(x, output_format='Magnitude')**self.power melspec = torch.matmul(self.mel_basis,", "3. ``(num_audio, 1, len_audio)`` It will be automatically broadcast to", "result. basis_norm : int Normalization for the CQT kernels. ``1``", ":] if output_format=='Magnitude': spec = spec_real.pow(2) + spec_imag.pow(2) if self.trainable==True:", "# Overlap and Add algorithm to connect all the frames", "torch.cos(k) W_i = torch.sin(k) V = Vc[:, :, :, 0]", "filter banks will be updated during model training. Default value", "layer information. If ``False``, it suppresses all prints. device :", "is equavalent to the next lower octave. The kernel creation", "same as the forward STFT. center : bool Putting the", "spectrogram inversion \"\"\" def loss_fn(pred, target): pred = pred.unsqueeze(1) if", "phase # Using the final phase to reconstruct the waveforms", "fmax : float The frequency for the highest CQT bin.", "CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) CQT_imag =", "x_imag = padding(x_imag) # Watch out for the positive and", "requires_grad=trainable_kernels) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) else: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos)", "self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable) # self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable) #", "''' Refer to https://github.com/zh217/torch-dct for the original implmentation. ''' x", "of filter requires for the kernel # n_octaves determines how", "-> str: return 'n_mfcc = {}'.format( (self.n_mfcc) ) class CQT1992(torch.nn.Module):", "verbose==True: print(\"Creating STFT kernels ...\", end='\\r') start = time() kernel_sin,", "\"\"\" def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True,", ") # Broadcast the tensor to the shape that fits", "is very important for calculating the correct frequency. trainable_kernels :", "reuse the code from the 1992 alogrithm [2] [1] Schörkhuber,", "X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1)) b2", "* from nnAudio.utils import * sz_float = 4 # size", "the same as the forward STFT. momentum : float The", "wcos) if trainable_CQT: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag,", "n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs): super().__init__() self.melspec_layer =", "verbose==True: print(\"Creating CQT kernels ...\", end='\\r') start = time() basis,", "self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as librosa # self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width print(\"Creating", "the correct frequency. n_mfcc : int The number of Mel-frequency", "spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. The parameters (e.g. n_fft, window) need", "for batch-wise-time-wise multiplication # Create filter windows for inverse kernel_sin,", "(MFCCs) of the input signal. This algorithm first extracts Mel", "forward(self, x, output_format=None): \"\"\" Convert a batch of waveforms to", "self.pad_amount = self.n_fft // 2 self.refresh_win = refresh_win start =", "``torch.nn.Module``. Parameters ---------- sr : int The sampling rate for", "Creating lowpass filter and make it a torch tensor if", "# Trying to make the default setting same as librosa", "creating kernels for CQT Q = 1/(2**(1/bins_per_octave)-1) print(\"Creating CQT kernels", "n_mels, fmin, fmax, htk=htk, norm=norm) mel_basis = torch.tensor(mel_basis) if verbose==True:", "self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # For normalization in the", "trainable=False, output_format=\"Complex\", verbose=True): super().__init__() # Trying to make the default", "len_audio)`` It will be automatically broadcast to the right shape", "sr=22050): super().__init__() self.stride = hop_length self.center = center self.pad_mode =", "self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.pad_amount, 0) elif self.pad_mode ==", "stride=self.stride) # Doing STFT by using conv1d # remove redundant", "fft(cqt_kernels)[:,:self.kernel_width//2+1] print(\"CQT kernels created, time used = {:.4f} seconds\".format(time()-start)) #", "Default is 12. norm : bool Normalization for the CQT", "the same memory... kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0) kernel_cos_inv =", "**kwargs Other arguments for Melspectrogram such as n_fft, n_mels, hop_length,", "else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': # Getting CQT Amplitude", "the Mel scale is quasi-logarithmic. When ``True`` is used, the", "X[:, :, :, 1] # broadcast dimensions to support 2D", "in [1]. Instead of convoluting the STFT results with a", "print(\"n_octaves = \", self.n_octaves) # Calculate the lowest frequency bin", "sumsqure for division # Only need to create this window", "= MelSpectrogram(sr=sr, verbose=verbose, **kwargs) self.m_mfcc = n_mfcc # attributes that", "important for calculating the correct frequency. trainable : bool Determine", "padding = nn.ReflectionPad1d(self.pad_amount) x = padding(x) spec_imag = conv1d(x, self.wsin,", "Default value is ``Complex``. \"\"\" output_format = output_format or self.output_format", "fmin, n_bins, bins_per_octave, norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels =", "function is trainable or not. Default value is ``False``. verbose", "int The window size. Default value is 2048. freq_bins :", "int The hop (or stride) size. Default value is 512.", "win_length=self.win_length, window=self.w, pad_mode=self.pad_mode) # Phase update rule angles[:,:,:] = rebuilt[:,:,:]", "Mel filter banks. The filter banks maps the n_fft to", "``False``. trainable_STFT : bool Determine if the STFT kenrels are", "torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, center=self.center) return inverse", "Preparing kernels for Short-Time Fourier Transform (STFT) # We set", "mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params} if stft_inversion_params: stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params}", "The frequency for the lowest CQT bin. Default is 32.70Hz,", "unwanted bottom bins # print(\"downsample_factor = \",self.downsample_factor) # print(CQT.shape) #", ">>> spec_layer = Spectrogram.MelSpectrogram() >>> specs = spec_layer(x) \"\"\" def", "is perfect, please use with extra care. Parameters ---------- n_fft", "the return type. 'Magnitude' will return the magnitude of the", "# Using the final phase to reconstruct the waveforms inverse", "axis. Default value is 'Magnitude'. verbose : bool If ``True``,", "= center self.pad_amount = self.n_fft // 2 self.refresh_win = refresh_win", "self.trainable_STFT = trainable_STFT self.verbose = verbose # Preparing for the", "kernels created, time used = {:.4f} seconds\".format(time()-start)) def forward(self,x, output_format=None):", "fmax=fmax, sr=sr) self.wsin = torch.tensor(wsin, dtype=torch.float) self.wcos = torch.tensor(wcos, dtype=torch.float)", "optimizer.zero_grad() pred_mel = mel_basis @ pred_stft loss = loss_fn(pred_mel, melspec)", "shape = ``(num_samples, freq_bins,time_steps)``. Examples -------- >>> spec_layer = Spectrogram.MelSpectrogram()", "time() # Create filter windows for stft kernel_sin, kernel_cos, self.bins2freq,", "is trainable or not. Default is ``False`` norm : int", "downsampling are more or less the same except in the", "and W_i trainable here k = - torch.arange(N, dtype=x.dtype, device=x.device)[None,", "iSTFT module by setting `iSTFT=True` if you want to use", "time() # Creating kernel for mel spectrogram start = time()", "freq_scale='no', center=True, pad_mode='reflect', iSTFT=False, fmin=50, fmax=6000, sr=22050, trainable=False, output_format=\"Complex\", verbose=True):", "for calculating the correct frequency. trainable_kernels : bool Determine if", "else: pass def forward(self, X, onesided=False, length=None, refresh_win=None): \"\"\" If", "The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer", "frequency bins, please use ``onesided=True``, else use ``onesided=False`` To make", "**kwargs): super().__init__() self.stride = hop_length self.center = center self.pad_mode =", "print(\"STFT filter created, time used = {:.4f} seconds\".format(time()-start)) print(\"Mel filter", "inferred autommatically if the input follows these 3 shapes. Most", "self.center = center self.pad_mode = pad_mode self.n_fft = n_fft self.power", "rate for the input audio. It is used to calucate", "self.bins2freq = create_fourier_kernels(n_fft=n_fft, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr) self.wsin", "freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=False) window_mask = get_window(window,int(win_length),", "shape = ``(num_samples, n_mfcc, time_steps)``. Examples -------- >>> spec_layer =", "time used = {:.4f} seconds\".format(time()-start)) print(\"Mel filter created, time used", "window_mask.unsqueeze(0).unsqueeze(-1)) if verbose==True: print(\"STFT kernels created, time used = {:.4f}", "Default value is 2048 n_mels : int The number of", ": str The padding method. Default value is 'reflect'. inverse", "= power self.trainable_mel = trainable_mel self.trainable_STFT = trainable_STFT self.verbose =", "forward(self, x): \"\"\" Convert a batch of waveforms to MFCC.", "= device if win_length==None: self.win_length=n_fft else: self.win_length=win_length if hop_length==None: self.hop_length", "real /= (self.n_fft) # Overlap and Add algorithm to connect", "forward(self, x): \"\"\" Convert a batch of waveforms to Mel", "argument does nothing. Please make sure the value is the", "output_format=None): \"\"\" Convert a batch of waveforms to spectrograms. Parameters", "also be calculated and the Mel filter banks will be", "= {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT1992v2(torch.nn.Module): \"\"\"This function is", "``None``, then the argument ``n_bins`` will be ignored and ``n_bins``", "containing all the spectrogram classes \"\"\" # 0.2.0 import torch", "is very important for calculating the correct frequency. trainable :", "or self.output_format x = broadcast_dim(x) if self.center: if self.pad_mode ==", "of the time-step or not. If ``False``, the time index", "the forward STFT. sr : int The sampling rate for", "(fourier_real, fourier_imag)) CQT = torch.stack((CQT_real,-CQT_imag),-1) if self.norm: CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1))", "mag as 1992 CQT = CQT*self.downsample_factor # Normalize again to", "# TODO: activate early downsampling later if possible # This", "verbose: print(f\"Target error of {loss_threshold} reached. Stopping optimization.\") break if", "* angles, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, center=self.center) # wav2spec conversion", "is the same as the forward STFT. hop_length : int", "onesided: X = extend_fbins(X) # extend freq X_real, X_imag =", "trainable or not. Default is ``False`` trainable_CQT : bool Determine", "# Creating kernel for mel spectrogram start = time() mel_basis", "function is trainable if trainable_window: window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window) self.register_parameter('window_mask',", "<NAME>., <NAME>., & <NAME>. “A fast Griffin-Lim algorithm,” IEEE Workshop", "complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag), (fourier_real, fourier_imag)) CQT = torch.stack((CQT_real,-CQT_imag),-1) if self.norm: CQT", "CQT = CQT*self.downsample_factor # Normalize again to get same result", "= conv1d(x_imag, self.wsin, stride=self.stride) imag = a2+b1 real = a1-b2", "self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if trainable_CQT: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)", "or not. If ``True``, the gradients for Mel filter banks", "time) if random_start: pred_stft_shape = (batch_size, n_freq, time) pred_stft =", "efficient algorithm for the calculation of a constant Q transform.”", "\"\"\" Convert a batch of waveforms to spectrums. Parameters ----------", "not None: if self.top_db < 0: raise ParameterError('top_db must be", "if self.pad_mode == 'constant': # padding = nn.ConstantPad1d(self.n_fft//2, 0) #", "= kernel_sin * window_mask wcos = kernel_cos * window_mask if", "= time() # print(\"Q = {}, fmin_t = {}, n_filters", "= default_sgd_kwargs mel_basis = self.mel_basis.detach() shape = melspec.shape batch_size, n_mels,", "the beginning of the CQT kernel, if ``True``, the time", "def extra_repr(self) -> str: return 'STFT kernel size = {},", "conversion rebuilt = torch.stft(inverse, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, pad_mode=self.pad_mode) #", "2 V = 2 * V return V.permute(0,2,1) # swapping", "time used = {:.4f} seconds\".format(time()-start)) else: self.downsample_factor=1. # Preparing CQT", "inverse is perfect, please use with extra care. Parameters ----------", "waveforms based on the \"fast Griffin-Lim\"[1]. This Griffin Lim is", "= downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding)", "Default is ``1``, which is same as the normalization used", ": torch tensor Spectrogram of the shape ``(batch, n_fft//2+1, timesteps)``", "TODO: Can make the W_r and W_i trainable here k", "self.window = window self.win_length = win_length self.iSTFT = iSTFT self.trainable", "the original implmentation. ''' x = x.permute(0,2,1) # make freq", "(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; ``shape = (num_samples, freq_bins,time_steps, 2)`` if", "self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten() self.nonzero_indices = (self.w_sum>1e-10) else:", "frequency bins, please use ``onesided=True``, else use ``onesided=False`` length :", "If freq_scale is ``no``, this argument does nothing. fmax :", "# For inverse, the Fourier kernels do not need to", "forward(self,x): \"\"\" Convert a batch of waveforms to spectrums. Parameters", "numbers of CQT bins. Default is 84. Will be ignored", "torch.stack((phase_real,phase_imag), -1) def forward_manual(self,x): \"\"\" Method for debugging \"\"\" x", "NameError(\"Please activate the iSTFT module by setting `iSTFT=True` if you", "Returns ------- spectrogram : torch.tensor It returns a tensor of", "fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave)) lenghts = np.ceil(Q", "n_fft if hop_length==None: hop_length = int(win_length // 4) self.output_format =", "“A fast Griffin-Lim algorithm,” IEEE Workshop on Applications of Signal", "Module containing all the spectrogram classes \"\"\" # 0.2.0 import", "will be calculated automatically. Default is ``None`` n_bins : int", "requires for the CQT n_filters = min(bins_per_octave, n_bins) self.n_octaves =", "to support 2D Conv kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1) kernel_cos =", "raise OverflowError(\"Overflow encountered in Mel -> STFT optimization\") if loss_threshold", "all the spectrogram classes \"\"\" # 0.2.0 import torch import", ": str The windowing function for iSTFT. It uses ``scipy.signal.get_window``,", "= win_length self.n_iter = n_iter self.center = center self.pad_mode =", "Default value is ``None`` which is equivalent to ``n_fft//4``. window", "for CQT Q = 1/(2**(1/bins_per_octave)-1) if verbose==True: print(\"Creating CQT kernels", "For normalization in the end freqs = fmin * 2.0", "= torch.tensor(kernel_sin * window) wcos = torch.tensor(kernel_cos * window) cqt_kernels_real", "128. hop_length : int The hop (or stride) size. Default", "or self.output_format self.num_samples = x.shape[-1] x = broadcast_dim(x) if self.center:", "X.dim()==4 , \"Inverse iSTFT only works for complex number,\" \\", "method proposed in [1]. I slightly modify it so that", "value is 2048. freq_bins : int Number of frequency bins.", "# ifft(X_real) = (a1, a2) # ifft(X_imag)*1j = (b1, b2)*1j", "output_format=='Magnitude': # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) elif output_format=='Complex': return", "the ``forward`` method. verbose : bool If ``True``, it shows", "value is the same as the forward STFT. fmin :", "2)`` if ``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer =", "cqt_kernels_imag) if verbose==True: print(\"CQT kernels created, time used = {:.4f}", "time = shape[0], shape[-2], shape[-1] _, n_freq = mel_basis.shape melspec", "``Complex``, or ``Phase``. The output_format can also be changed during", "The total numbers of CQT bins. Default is 84. Will", "window_mask wcos = kernel_cos * window_mask if self.trainable==False: self.register_buffer('wsin', wsin)", "= torch.tensor(cqt_kernels.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32)) if trainable_STFT: wsin = torch.nn.Parameter(wsin,", "else: real = real[:, :length] return real def extra_repr(self) ->", "set `length` as your intended waveform length. By default, ``length=None``,", "def __init__(self, n_fft=2048, freq_bins=None, hop_length=512, window='hann', freq_scale='no', center=True, pad_mode='reflect', fmin=50,", "norm arg is not functioning self.hop_length = hop_length self.center =", "= hop_length self.center = center self.pad_mode = pad_mode self.output_format =", "can increase the speed by setting ``refresh_win=False``. Else please keep", "if random_start: pred_stft_shape = (batch_size, n_freq, time) pred_stft = torch.zeros(*pred_stft_shape,", "of Mel-frequency cepstral coefficients norm : string The default value", "self.cqt_kernels_imag, hop, self.padding) CQT = torch.cat((CQT1, CQT),1) CQT = CQT[:,-self.n_bins:,:]", "output_format : str Control the spectrogram output type, either ``Magnitude``,", "The correct shape will be inferred autommatically if the input", "\"\"\" assert S.dim()==3 , \"Please make sure your input is", "the forward kernel do not share the same memory... kernel_sin_inv", "same output length of the original waveform, please set `length`", "loss_fn(pred, target): pred = pred.unsqueeze(1) if pred.ndim == 3 else", "Mel filter bank. fmax : int The ending frequency for", "output_format='Magnitude', verbose=True): super().__init__() # norm arg is not functioning self.trainable", "please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. The parameters (e.g. n_fft, window) need to", "fmax=None, norm=1, trainable_mel=False, trainable_STFT=False, verbose=True, **kwargs): super().__init__() self.stride = hop_length", "is ``1``, which is same as the normalization used in", "time() # print(\"Q = {}, fmin_t = {}, n_filters =", "= ``(num_samples, freq_bins, time_steps, 2)``; 'Phase' will return the phase", "------- spectrogram : torch.tensor It returns a tensor of spectrograms.", "self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) CQT = torch.cat((CQT1, CQT),1) CQT =", "dtype=torch.float).unsqueeze(-1) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1) # Decide if the Fourier", "waveform length. By default, ``length=None``, which will remove ``n_fft//2`` samples", ": int To make sure the inverse STFT has the", "print(self.lenghts.view(-1,1).shape) # Normalizing the output with the downsampling factor, 2**(self.n_octaves-1)", "# +0.0 removes -0.0 elements, which leads to error in", "Imaginary part of the signal. \"\"\" x_real = broadcast_dim(x_real) x_imag", "not. Default value is ``False``. verbose : bool If ``True``,", "proposed in [1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more", "possible windowing functions. The default value is 'hann'. freq_scale :", "frequency for the lowest frequency bin. If freq_scale is ``no``,", "= get_early_downsample_params(sr, hop_length, fmax_t, Q, self.n_octaves, verbose) self.register_buffer('early_downsample_filter', early_downsample_filter) if", "Determine if the STFT kenrels are trainable or not. If", "forward STFT. window : str The windowing function for iSTFT.", "Do early downsampling if this argument is True if verbose==True:", "overlap_add(real, self.stride) # Prepare the window sumsqure for division #", "kernels will also be caluclated and the STFT kernels will", "This function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class, which is", "kernel. Default value if ``True``. pad_mode : str The padding", "X, onesided=False, length=None, refresh_win=None): \"\"\" If your spectrograms only have", "in order to obtain the correct inverse. If trainability is", "# print(\"downsample_factor = \",self.downsample_factor) # print(CQT.shape) # print(self.lenghts.view(-1,1).shape) # Normalizing", "{}, fmin_t = {}, n_filters = {}\".format(Q, self.fmin_t, n_filters)) basis,", "get back another half if onesided: X = extend_fbins(X) #", "return. Can be either ``Magnitude`` or ``Complex`` or ``Phase``. Default", "lies in the real part real = a1 - b2", "def __init__(self, n_fft, n_iter=32, hop_length=None, win_length=None, window='hann', center=True, pad_mode='reflect', momentum=0.99,", "lowest frequency bin for the top octave kernel self.fmin_t =", "end of the output. If your input spectrograms X are", "is quasi-logarithmic. When ``True`` is used, the Mel scale is", "from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``,", "value is the same as the forward STFT. momentum :", "Preparing a new variable for downsampling for i in range(self.n_octaves-1):", "<NAME>. “A fast Griffin-Lim algorithm,” IEEE Workshop on Applications of", "spacing. Please make sure the value is the same as", "n_iter=32 : int The number of iterations for Griffin-Lim. The", "4) self.output_format = output_format self.trainable = trainable self.stride = hop_length", "int The sampling rate for the input audio. It is", "...\", end='\\r') start = time() lowpass_filter = torch.tensor(create_lowpass_filter( band_center =", "a float epsilon = 10e-8 # fudge factor for normalization", "= (self.w_sum>1e-10) else: pass real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices]) # Remove", "self.downsample_factor) hop = self.hop_length CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop,", "the frequency domain CQT kernel is trainable or not. Default", "else: self.win_length=win_length if hop_length==None: self.hop_length = n_fft//4 else: self.hop_length =", "/ (1 + self.momentum)) * tprev[:,:,:] # Phase normalization angles", "if possible self.trainable = trainable self.output_format = output_format # It", "librosa. This class inherits from ``torch.nn.Module``, therefore, the usage is", "Number of bins per octave. Default is 12. trainable_STFT :", "= CQT[:,-self.n_bins:,:] # Removing unwanted top bins if self.norm: CQT", "# creating kernels for CQT Q = 1/(2**(1/bins_per_octave)-1) print(\"Creating CQT", "when sqrt(0) due to output=0 else: return torch.sqrt(spec) elif output_format=='Complex':", "= kernel_cos * window wsin = torch.tensor(wsin) wcos = torch.tensor(wcos)", "{}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT2010(torch.nn.Module): \"\"\" This algorithm is", "Phase normalization angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing the", "same as the STFT in order to obtain the correct", "to calculate the CQT of the input signal. Input signal", "the speed by setting ``refresh_win=False``. Else please keep ``refresh_win=True`` \"\"\"", "{**default_mel_inversion_params, **mel_inversion_params} if stft_inversion_params: stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params} recon_stft =", "Check conditions if not loss.isfinite(): raise OverflowError(\"Overflow encountered in Mel", "# Converting kernels from numpy arrays to torch tensors wsin", "else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) print(\"STFT kernels created, time used", "\"\"\" # 0.2.0 import torch import torch.nn as nn from", "and ``2`` means L2 normalization. Default is ``1``, which is", "kernel_cos * window wsin = torch.tensor(wsin) wcos = torch.tensor(wcos) if", "does nothing. fmax : int The ending frequency for the", "works for 1 single frame. i.e. input shape = (batch,", "= spec_layer(x) \"\"\" # To DO: # need to deal", "Adjusting the top minium bins if fmax_t > sr/2: raise", "torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0) if iSTFT: self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1)) self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1)) #", "= verbose # Preparing for the stft layer. No need", "to normalize as librosa # self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width print(\"Creating STFT kernels ...\",", "'cpu'. Returns ------- spectrogram : torch.tensor It returns a tensor", "create_fourier_kernels(n_fft, win_length=win_length, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=False) window_mask", "device : str Choose which device to initialize this layer.", "elif self.pad_mode == 'reflect': if self.num_samples < self.pad_amount: raise AssertionError(\"Signal", "as the forward STFT. fmax : int The ending frequency", "Lim is a direct clone from librosa.griffinlim. [1] <NAME>., <NAME>.,", "40Hz. \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12,", "seconds\".format(time()-start)) # creating kernels for stft # self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying", "CQT*torch.sqrt(self.lenghts.view(-1,1,1)) # Normalizing the output with the downsampling factor, 2**(self.n_octaves-1)", "torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels) kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels) self.register_parameter('kernel_sin', kernel_sin) self.register_parameter('kernel_cos', kernel_cos)", "== 'reflect': if self.num_samples < self.pad_amount: raise AssertionError(\"Signal length shorter", ":func:`~nnAudio.Spectrogram.Griffin_Lim`. Parameters ---------- onesided : bool If your spectrograms only", "a1 = conv1d(x_real, self.wcos, stride=self.stride) a2 = conv1d(x_real, self.wsin, stride=self.stride)", "the CQT kernel size. The result with and without early", "waveforms. Parameters ---------- S : torch tensor Spectrogram of the", "downsampling factor, 2**(self.n_octaves-1) is make it # same mag as", "is 'cpu' Returns ------- spectrogram : torch.tensor It returns a", "self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.kernel_width//2, 0) elif", "Default value is 2048. n_iter=32 : int The number of", "self.stft.inverse(recon_stft, **stft_inversion_params) return recon_audio class MFCC(torch.nn.Module): \"\"\"This function is to", "is for developing purpose # Please don't use the following", "of window mask so that it can be used later", "is 'cpu'. Returns ------- spectrogram : torch.tensor It returns a", "value is ``32`` hop_length : int The hop (or stride)", "the correct frequency. trainable_kernels : bool Determine if the STFT", "is 12. norm : bool Normalization for the CQT result.", "no guarantee that the inverse is perfect, please use with", "Griffin-Lim. The default value is ``32`` hop_length : int The", "normalization in the end freqs = fmin * 2.0 **", "momentum=0.9) if sgd_kwargs: default_sgd_kwargs.update(sgd_kwargs) sgd_kwargs = default_sgd_kwargs mel_basis = self.mel_basis.detach()", "used to calculate the correct ``fmin`` and ``fmax``. Setting the", "created, time used = {:.4f} seconds\".format(time()-start)) else: pass if trainable_mel:", "is ``None``, which means ``n_fft//2+1`` bins. hop_length : int The", "n_fft, 1) to support 2D Conv kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1)", "an input with fixed number of timesteps, you can increase", "Q, self.n_octaves, verbose) self.register_buffer('early_downsample_filter', early_downsample_filter) if verbose==True: print(\"Early downsampling filter", "frequency. trainable : bool Determine if the STFT kenrels are", "__init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84, trainable_STFT=False, trainable_CQT=False, bins_per_octave=12, output_format='Complex',", "istft later self.w = torch.tensor(get_window(window, int(self.win_length), fftbins=True), device=device).float() def forward(self,", "and prepare the shape for batch-wise-time-wise multiplication # Create filter", "requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)", "win_length self.n_iter = n_iter self.center = center self.pad_mode = pad_mode", "the discrete cosine transform is calcuated to obtain the final", "tensor if verbose==True: print(\"Creating low pass filter ...\", end='\\r') start", "torch.abs(torch.tensor([ref])) self.register_buffer('amin', amin) self.register_buffer('ref', ref) self.top_db = top_db self.n_mfcc =", "are trainable if trainable_kernels: # Making all these variables trainable", "self.win_length = win_length self.iSTFT = iSTFT self.trainable = trainable start", "coefficients (MFCCs) of the input signal. This algorithm first extracts", "Default value is 128. hop_length : int The hop (or", "call it version 2. [1] Brown, <NAME>. and <NAME>. “An", "class, which is to convert spectrograms back to waveforms. It", "= {:.4f} seconds\".format(time()-start)) def forward(self, x, output_format=None): \"\"\" Convert a", "= torch.tensor(kernel_cos, dtype=torch.float) # In this way, the inverse kernel", "trainable or not. Default value is ``False``. verbose : bool", "will also be caluclated and the CQT kernels will be", "CQT = CQT*self.downsample_factor if output_format=='Magnitude': # Getting CQT Amplitude return", "important for calculating the correct frequency. hop_length : int The", "Normalize again to get same result as librosa CQT =", "self.wcos, stride=self.stride) b2 = conv1d(x_imag, self.wsin, stride=self.stride) imag = a2+b1", "the waveforms inverse = torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length, win_length=self.win_length,", "is trainable or not. Default is ``False`` trainable_CQT : bool", "is 'ortho'. Normalization for DCT basis **kwargs Other arguments for", "only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``, else use", "time used = {:.4f} seconds\".format(time()-start)) def forward(self, x, output_format=None): \"\"\"", "Prepare the shape of window mask so that it can", "fmin=fmin, fmax=fmax, sr=sr, verbose=False) window_mask = get_window(window,int(win_length), fftbins=True) # For", "= output_format self.trainable = trainable self.stride = hop_length self.center =", "512. window : str The windowing function for STFT. It", "# padding = nn.ReflectionPad1d(self.n_fft//2) # x_real = padding(x_real) # x_imag", ":func:`~nnAudio.Spectrogram.CQT1992v2` documentation\"\"\" pass # The section below is for developing", "= Spectrogram.MelSpectrogram() >>> specs = spec_layer(x) \"\"\" def __init__(self, sr=22050,", "Add algorithm to connect all the frames real = overlap_add(real,", "CQT result. basis_norm : int Normalization for the CQT kernels.", "Create filter windows for stft wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft,", "Q = 1/(2**(1/bins_per_octave)-1) if verbose==True: print(\"Creating CQT kernels ...\", end='\\r')", "default_sgd_kwargs mel_basis = self.mel_basis.detach() shape = melspec.shape batch_size, n_mels, time", "= nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2)", "CQT kernels are trainable or not. If ``True``, the gradients", "correct frequency. trainable_kernels : bool Determine if the STFT kenrels", "# print(\"n_octaves = \", self.n_octaves) # Calculate the lowest frequency", ": bool When ``False`` is used, the Mel scale is", "CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) # Normalizing the output with", ">>> specs = spec_layer(x) \"\"\" def __init__(self, sr=22050, n_fft=2048, n_mels=128,", "TOOLBOX FOR MUSIC PROCESSING.” (2010). [2] Brown, <NAME>. and <NAME>.", "freq_bins,time_steps)``; ``Complex`` will return the STFT result in complex number,", "is 'hann'. center : bool Putting the STFT keneral at", "batch-wise-time-wise multiplication # Create filter windows for inverse kernel_sin, kernel_cos,", "The default value is 'hann'. Please make sure the value", "dimensions to support 2D convolution X_real_bc = X_real.unsqueeze(1) X_imag_bc =", "X_real_bc = X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos_inv,", "used = {:.4f} seconds\".format(time()-start)) print(\"Mel filter created, time used =", "self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.n_fft//2) x = padding(x) imag", "**kwargs): super().__init__() self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs) self.m_mfcc = n_mfcc", "means L1 normalization, and ``2`` means L2 normalization. Default is", "means L2 normalization. Default is ``1``, which is same as", "= center self.pad_mode = pad_mode self.n_fft = n_fft self.freq_bins =", "self.num_samples = x.shape[-1] x = broadcast_dim(x) if self.center: if self.pad_mode", "(freq_bins, 1, n_fft, 1) to support 2D Conv kernel_sin =", "ParameterError('top_db must be non-negative') # make the dim same as", "of the STFT kernel, if ``True``, the time index is", "frequency. trainable_kernels : bool Determine if the STFT kenrels are", "extra_repr(self) -> str: return 'STFT kernel size = {}, CQT", "time index is the center of the iSTFT kernel. Default", "for the CQT result. basis_norm : int Normalization for the", "for downsampling for i in range(self.n_octaves-1): hop = hop//2 x_down", "bool Determine if the Mel filter banks are trainable or", "padding = nn.ConstantPad1d(self.kernel_width//2, 0) elif self.pad_mode == 'reflect': padding =", "to increase computational speed. \"\"\" if refresh_win==None: refresh_win=self.refresh_win assert X.dim()==4", "# Preparing for the stft layer. No need for center", "trainable_window: window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window) self.register_parameter('window_mask', window_mask) else: self.register_buffer('window_mask', window_mask)", "norm is used to normalize the final CQT result by", "X are of the same length, please use ``refresh_win=None`` to", "= dict(lr=1e3, momentum=0.9) if sgd_kwargs: default_sgd_kwargs.update(sgd_kwargs) sgd_kwargs = default_sgd_kwargs mel_basis", "spectrogram contains only half of the n_fft # Use extend_fbins", "stft kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=freq_bins,", "# Preparing kernels for Short-Time Fourier Transform (STFT) # We", "* N) W_r = torch.cos(k) W_i = torch.sin(k) V =", "self.wcos, stride=self.hop_length) fourier_imag = conv1d(x, self.wsin, stride=self.hop_length) # CQT CQT_real,", "the next lower octave. The kernel creation process is still", "print(\"num_octave = \", self.n_octaves) # Calculate the lowest frequency bin", "The inverse function only works for 1 single frame. i.e.", "padding length (n_fft // 2).\") padding = nn.ReflectionPad1d(self.pad_amount) x =", "forward kernel do not share the same memory... kernel_sin_inv =", "= padding(x) # CQT CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) CQT_imag", "extend_fbins function to get back another half if onesided: X", "phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self) -> str:", "tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``. Examples -------- >>>", "win_length self.iSTFT = iSTFT self.trainable = trainable start = time()", "kernels for CQT Q = 1/(2**(1/bins_per_octave)-1) if verbose==True: print(\"Creating CQT", "fmax=6000, sr=22050, trainable=False, output_format=\"Complex\", verbose=True): super().__init__() # Trying to make", "the STFT kernel. Default value if ``True``. pad_mode : str", "automatically. Default is ``None`` n_bins : int The total numbers", "# STFT fourier_real = conv1d(x, self.wcos, stride=self.hop_length) fourier_imag = conv1d(x,", "using conv1d # remove redundant parts spec_real = spec_real[:, :self.freq_bins,", "self.pad_mode = pad_mode self.n_fft = n_fft # Create filter windows", "[1] Brown, <NAME>. and <NAME>. “An efficient algorithm for the", "time() kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no')", "Number of frequency bins. Default is ``None``, which means ``n_fft//2+1``", "which leads to error in calculating phase def inverse(self, X,", "the Mel-frequency cepstral coefficients (MFCCs) of the input signal. This", "= {:.4f} seconds\".format(time()-start)) # Caluate num of filter requires for", "shape = (batch, n_fft, 1) \"\"\" def __init__(self, n_fft=2048, freq_bins=None,", "used = {:.4f} seconds\".format(time()-start)) else: pass def forward(self, x, output_format=None):", "default setting same as librosa if win_length==None: win_length = n_fft", "If 'no' is used, the bin will start at 0Hz", "self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) else: # Calculate the top bin frequency fmax_t =", "n_fft = \",self.n_fft) # If center==True, the STFT window will", "batch of waveforms to Mel spectrograms. Parameters ---------- x :", "** (np.r_[0:n_bins] / np.float(bins_per_octave)) lenghts = np.ceil(Q * sr /", "from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``. Parameters", "timesteps)`` \"\"\" assert S.dim()==3 , \"Please make sure your input", "calculating the correct frequency. trainable : bool Determine if the", "during model training. Default value is ``False``. verbose : bool", "trainable_mel={}'.format( (*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT ) def to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8,", "self.output_format = output_format # It will be used to calculate", "\",self.n_fft) # Preparing kernels for Short-Time Fourier Transform (STFT) #", "\"\"\" Method for debugging \"\"\" x = broadcast_dim(x) if self.center:", "self.center = center self.pad_mode = pad_mode self.momentum = momentum self.device", "arg is not functioning self.trainable = trainable self.hop_length = hop_length", "log_spec = 10.0 * torch.log10(torch.max(S, self.amin)) log_spec -= 10.0 *", "method proposed in [1]. Instead of convoluting the STFT results", "range in the CQT filter instead of here. if verbose==True:", "The default value is ``32`` hop_length : int The hop", "on the \"fast Griffin-Lim\"[1]. This Griffin Lim is a direct", "requires_grad=True) sgd_kwargs[\"lr\"] = sgd_kwargs[\"lr\"] * batch_size optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs)", "output with the downsampling factor, 2**(self.n_octaves-1) is make it #", "of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``. The", "from the audio clips, then the discrete cosine transform is", "If ``True``, the gradients for CQT kernels will also be", "it suppresses all prints. device : str Choose which device", "is same as the :func:`~nnAudio.Spectrogram.iSTFT` class, which is to convert", "else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) # If center==True, the STFT", "loss = (pred - target).pow(2).sum(-2).mean() return loss verbose = verbose", "to the right shape \"\"\" x = broadcast_dim(x) spec =", "amin) self.register_buffer('ref', ref) self.top_db = top_db self.n_mfcc = n_mfcc def", "the time index is the beginning of the CQT kernel,", "window) need to be the same as the STFT in", "here k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi", "1992 algorithm. Therefore, we can reuse the code from the", "CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) else: CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8) return CQT elif", "later if possible # This will be used to calculate", "real = overlap_add(real, self.stride) # Prepare the window sumsqure for", "self.padding) # Getting the top octave CQT x_down = x", "refresh_win==None: refresh_win=self.refresh_win assert X.dim()==4 , \"Inverse iSTFT only works for", "= torch.sin(k) V = Vc[:, :, :, 0] * W_r", "created, time used = {:.4f} seconds\".format(time()-start)) print(\"Mel filter created, time", "spectrograms back to waveforms based on the \"fast Griffin-Lim\"[1]. This", "= (torch.pinverse(mel_basis) @ melspec).clamp(eps) pred_stft = nn.Parameter(pred_stft, requires_grad=True) sgd_kwargs[\"lr\"] =", "``False``. fmin : int The starting frequency for the lowest", "``refresh_win=True`` \"\"\" if (hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv')", "0.50, # kernelLength=256, # transitionBandwidth=0.001)) lowpass_filter = torch.tensor(create_lowpass_filter( band_center =", "# This will be used to calculate filter_cutoff and creating", "= self.fmin_t*2**((remainder-1)/bins_per_octave) self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium", "V[:, :, 1:] /= np.sqrt(N / 2) * 2 V", "= hop_length self.center = center self.pad_mode = pad_mode self.norm =", "fmin*2**(self.n_octaves-1) remainder = n_bins % bins_per_octave # print(\"remainder = \",", "# Prepare the right shape to do inverse # if", "n_fft self.power = power self.trainable_mel = trainable_mel self.trainable_STFT = trainable_STFT", "have ``n_fft//2+1`` frequency bins, please use ``onesided=True``, else use ``onesided=False``", "rate is very important for calculating the correct frequency. n_mfcc", "the value is the same as the forward STFT. window", "follows these 3 shapes. Most of the arguments follow the", "you want to use `inverse`\") assert X.dim()==4 , \"Inverse iSTFT", "if verbose==True: print(\"iSTFT kernels created, time used = {:.4f} seconds\".format(time()-start))", "documentation\"\"\" pass # The section below is for developing purpose", "conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1)) # compute", "is the beginning of the iSTFT kernel, if ``True``, the", "for STFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation", "is 128. hop_length : int The hop (or stride) size.", "your input is in the shape of (batch, freq_bins, timesteps)\"", "be broadcasted batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1) log_spec = torch.max(log_spec, batch_wise_max -", "# self.wcos = torch.nn.Parameter(self.wcos) def forward(self, x): \"\"\" Convert a", "self.iSTFT = iSTFT self.trainable = trainable start = time() #", "used, the Mel scale is quasi-logarithmic. When ``True`` is used,", "is stored as ``(real, imag)`` in the last axis. Default", "audio is downsampled, the CQT relative to the downsampled input", "is equivalent to ``n_fft//4``. window : str The windowing function", "CQT CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) CQT_imag", "is 'reflect'. htk : bool When ``False`` is used, the", "return pred_stft, pred_mel.detach(), losses return pred_stft def inverse(self, melspec, mel_inversion_params=None,", "1992 alogrithm [2] [1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR", "class CQT1992(torch.nn.Module): \"\"\" This alogrithm uses the method proposed in", "used in librosa. window : str The windowing function for", "hop = hop//2 x_down = downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down,", "in the shape of (batch, freq_bins, timesteps, 2).\"\\ \"\\nIf you", ">>> spec_layer = Spectrogram.CQT2010v2() >>> specs = spec_layer(x) \"\"\" #", "** 0.5 pred_stft = pred_stft.view((*shape[:-2], n_freq, time)) if return_extras: return", "the same as the forward STFT. fmin : int The", "that it can be used later in inverse self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1))", "'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format( self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable", "and kernel_cos have the shape (freq_bins, 1, n_fft, 1) to", "spec_layer(x) \"\"\" # To DO: # need to deal with", "right shape \"\"\" x = broadcast_dim(x) spec = self.stft(x, output_format='Magnitude')**self.power", "self.output_format x = broadcast_dim(x) if self.center: if self.pad_mode == 'constant':", "need to be the same as the STFT in order", "the input audio. It is used to calculate the correct", "= torch.cat((CQT1, CQT),1) CQT = CQT[:,-self.n_bins:,:] # Removing unwanted top", "bins_per_octave, norm=basis_norm, topbin_check=False) # For normalization in the end freqs", "X_imag_bc = X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1)) b2 =", "is 'reflect'. trainable : bool Determine if the CQT kernels", "= real[:, :length] return real class Griffin_Lim(torch.nn.Module): \"\"\" Converting Magnitude", "to convert spectrograms back to waveforms. It only works for", "steps if hasattr(self, 'w_sum')==False or refresh_win==True: self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2],", "arguments default_sgd_kwargs = dict(lr=1e3, momentum=0.9) if sgd_kwargs: default_sgd_kwargs.update(sgd_kwargs) sgd_kwargs =", "magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. The parameters (e.g. n_fft, window)", "= fmin*2**(self.n_octaves-1) remainder = n_bins % bins_per_octave # print(\"remainder =", "(or stride) size. Default value is 512. fmin : float", "bins if self.norm: CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))", "``False``, it suppresses all prints device : str Choose which", "memory. fmin : int The starting frequency for the lowest", "True) or (hasattr(self, 'kernel_cos_inv') != True): raise NameError(\"Please activate the", "paddings at the beginning # and ending are required. if", "the last axis. Default value is 'Magnitude'. verbose : bool", "square. If you have an input with fixed number of", "minus sign for imaginary part elif output_format=='Phase': return torch.atan2(-spec_imag+0.0,spec_real) #", "= torch.cos(k) W_i = torch.sin(k) V = Vc[:, :, :,", "very important for calculating the correct frequency. hop_length : int", "Parameters ---------- S : torch tensor Spectrogram of the shape", "support nn.DataParallel mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel) self.register_parameter('mel_basis', mel_basis) else: self.register_buffer('mel_basis',", "top bins if self.norm: CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT =", "= spec_imag[:, :self.freq_bins, :] if output_format=='Magnitude': spec = spec_real.pow(2) +", "= pad_mode self.n_fft = n_fft self.freq_bins = freq_bins self.trainable =", "end of the output. refresh_win : bool Recalculating the window", "will be ignored and ``n_bins`` will be calculated automatically. Default", "is ``False`` output_format : str Determine the return type. 'Magnitude'", "_ in range(self.n_iter): tprev = rebuilt # Saving previous rebuilt", "// 2 self.window = window self.win_length = win_length self.iSTFT =", "padding = nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # STFT fourier_real =", "stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1)) # compute real and", "CQT),1) CQT = CQT[:,-self.n_bins:,:] # Removing unwanted top bins if", "dtype=torch.float) self.wcos = torch.tensor(wcos, dtype=torch.float) def forward(self,x): \"\"\" Convert a", "remove ``n_fft//2`` samples from the start and the end of", "arguments for Melspectrogram such as n_fft, n_mels, hop_length, and window", "scale is logarithmic. The default value is ``False``. fmin :", "time used = {:.4f} seconds\".format(time()-start)) else: pass def forward(self, X,", "is ``False`` output_format : str Control the spectrogram output type,", "''' x = x.permute(0,2,1) # make freq the last axis,", "the value is the same as the forward STFT. freq_scale", "self.power = power self.trainable_mel = trainable_mel self.trainable_STFT = trainable_STFT self.verbose", "The number of Mel filter banks. The filter banks maps", "``torch.nn.Module``. This alogrithm uses the resampling method proposed in [1].", "training. Default value is ``False``. verbose : bool If ``True``,", "or 'no' Determine the spacing between each frequency bin. When", "= torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def forward_manual(self,x): \"\"\" Method for", "prepare the shape for batch-wise-time-wise multiplication # Create filter windows", "hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', earlydownsample=True,", "default_sgd_kwargs.update(sgd_kwargs) sgd_kwargs = default_sgd_kwargs mel_basis = self.mel_basis.detach() shape = melspec.shape", "not ``None``, then the argument ``n_bins`` will be ignored and", "self.pad_amount:-self.pad_amount] else: if self.center: real = real[:, self.pad_amount:self.pad_amount + length]", "length, please use ``refresh_win=None`` to increase computational speed. \"\"\" if", "bins_per_octave)) if verbose==True: print(\"num_octave = \", self.n_octaves) # Calculate the", "is logarithmic. The default value is ``False``. fmin : int", "**sgd_kwargs) losses = [] for i in range(max_steps): optimizer.zero_grad() pred_mel", "size = {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT1992v2(torch.nn.Module): \"\"\"This function", "training. Default value is ``False`` output_format : str Determine the", "torch.stack((phase_real,phase_imag), -1) class CQT(CQT1992v2): \"\"\"An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer", "STFT results with a gigantic CQT kernel covering the full", "OverflowError(\"Overflow encountered in Mel -> STFT optimization\") if loss_threshold and", "extracts Mel spectrograms from the audio clips, then the discrete", "else: pred_stft = (torch.pinverse(mel_basis) @ melspec).clamp(eps) pred_stft = nn.Parameter(pred_stft, requires_grad=True)", "= torch.nn.Parameter(wsin, requires_grad=self.trainable) wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable) self.register_parameter('wsin', wsin) self.register_parameter('wcos',", "rebuilt[:,:,:] - (self.momentum / (1 + self.momentum)) * tprev[:,:,:] #", "should be in either of the following shapes.\\n 1. ``(len_audio)``\\n", "banks maps the n_fft to mel bins. Default value is", "a2 = conv1d(x_real, self.wsin, stride=self.stride) b1 = conv1d(x_imag, self.wcos, stride=self.stride)", "initialize this layer. Default value is 'cpu' \"\"\" def __init__(self,", "bins_per_octave=12, output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'): super().__init__() # norm arg", "= n_fft if hop_length==None: hop_length = int(win_length // 4) self.output_format", "value is ``False`` output_format : str Determine the return type.", "is 2048 n_mels : int The number of Mel filter", "to the right shape \"\"\" output_format = output_format or self.output_format", "np.pi / (2 * N) W_r = torch.cos(k) W_i =", "= torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])], dim=2) Vc =", "value is ``Complex``. \"\"\" output_format = output_format or self.output_format self.num_samples", "the code from the 1992 alogrithm [2] [1] <NAME>. “CONSTANT-Q", "self.kernel_sin_inv, stride=(1,1)) # compute real and imag part. signal lies", "used later in inverse self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1)) if verbose==True: print(\"STFT kernels", "n_filters = min(bins_per_octave, n_bins) self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) if", "if fmax_t > sr/2: raise ValueError('The top bin {}Hz has", "self.cqt_kernels_real, stride=self.hop_length) CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) # Getting CQT", "freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False, trainable_window=False,", "value is 'cpu' \"\"\" def __init__(self, n_fft, n_iter=32, hop_length=None, win_length=None,", "of CQT bins. Default is 84. Will be ignored if", "frequency for the highest CQT bin. Default is ``None``, therefore", "please use ``onesided=True``, else use ``onesided=False`` To make sure the", "``(len_audio)``\\n 2. ``(num_audio, len_audio)``\\n 3. ``(num_audio, 1, len_audio)`` It will", "lowest CQT bin. Default is 32.70Hz, which coresponds to the", "window sum square. If you have an input with fixed", "to scipy documentation for possible windowing functions. The default value", "= center self.pad_mode = pad_mode self.momentum = momentum self.device =", "# size of a float epsilon = 10e-8 # fudge", "default_stft_inversion_params = {} mel_inversion_params = mel_inversion_params or {} stft_inversion_params =", "conv1d(x_imag, self.wcos, stride=self.stride) b2 = conv1d(x_imag, self.wsin, stride=self.stride) imag =", "axis and freq axis def forward(self, x): \"\"\" Convert a", "bins. Default is ``None``, which means ``n_fft//2+1`` bins. hop_length :", "of the STFT kernel. Default value if ``True``. pad_mode :", "* V return V.permute(0,2,1) # swapping back the time axis", "Melspectrogram of the input signal. Input signal should be in", "frequency bin. When `linear` or `log` is used, the bin", "lowest Mel filter bank. fmax : int The ending frequency", "STFT result, shape = ``(num_samples, freq_bins, time_steps)``; 'Complex' will return", "trainable_mel self.trainable_STFT = trainable_STFT self.verbose = verbose # Preparing for", "functions. The default value is 'hann'. center : bool Putting", "lowpass filter and make it a torch tensor if verbose==True:", "freq_scale : 'linear', 'log', or 'no' Determine the spacing between", "Q transform.” (1992). This function is to calculate the CQT", "sr=sr, verbose=verbose) kernel_sin = torch.tensor(kernel_sin, dtype=torch.float) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float)", "pad_mode='reflect', earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True): super().__init__() self.norm = norm #", "Griffin-Lim algorithm,” IEEE Workshop on Applications of Signal Processing to", "beginning of the CQT kernel, if ``True``, the time index", "self.earlydownsample == True: # Do early downsampling if this argument", "the input audio. It is used to calucate the correct", "X_imag_bc = X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1)) b2 =", "than reflect padding length (n_fft // 2).\") padding = nn.ReflectionPad1d(self.pad_amount)", "stft wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin,", "CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': if self.trainable==False: # Getting CQT Amplitude return", "the STFT kernels will be updated during model training. Default", "magnitude spectrogram, please consider using Griffin-Lim.\" if onesided: X =", "= (batch, n_fft, 1) \"\"\" def __init__(self, n_fft=2048, freq_bins=None, hop_length=512,", "CQT relative to the downsampled input is equavalent to the", "for stft and istft later self.w = torch.tensor(get_window(window, int(self.win_length), fftbins=True),", "Spectrogram.MFCC() >>> mfcc = spec_layer(x) \"\"\" def __init__(self, sr=22050, n_mfcc=20,", "X_imag = X[:, :, :, 0], X[:, :, :, 1]", "therefore, the usage is same as ``torch.nn.Module``. Parameters ---------- n_fft", "starting frequency for the lowest frequency bin. If freq_scale is", "bin. Default is 32.70Hz, which coresponds to the note C0.", "# is make it same mag as 1992 CQT =", "trainable : bool Determine if the CQT kernels are trainable", "n_iter=32, hop_length=None, win_length=None, window='hann', center=True, pad_mode='reflect', momentum=0.99, device='cpu'): super().__init__() self.n_fft", "'log', or 'no' Determine the spacing between each frequency bin.", ": bool Determine if the Mel filter banks are trainable", "the Fourier kernels do not need to be windowed window_mask", "reflect padding length (n_fft // 2).\") padding = nn.ReflectionPad1d(self.pad_amount) x", "pad_mode=self.pad_mode) # Phase update rule angles[:,:,:] = rebuilt[:,:,:] - (self.momentum", "Convert a batch of waveforms to spectrograms. Parameters ---------- x", "size. Default value is 2048. freq_bins : int Number of", "ending frequency for the highest frequency bin. If freq_scale is", "n_fft real /= (self.n_fft) # Overlap and Add algorithm to", "self.padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect': self.padding =", "will be put in the middle, and paddings at the", "the CQT filter instead of here. if verbose==True: print(\"Creating STFT", "= nn.ReflectionPad1d(self.pad_amount) x = padding(x) spec_imag = conv1d(x, self.wsin, stride=self.stride)", "keneral at the center of the time-step or not. If", "Making all these variables trainable kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels) kernel_cos", "# TODO: Can make the W_r and W_i trainable here", "n_fft = \",self.n_fft) # Preparing kernels for Short-Time Fourier Transform", "fmax=6000, sr=22050, trainable_kernels=False, trainable_window=False, verbose=True, refresh_win=True): super().__init__() # Trying to", "# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) #", "freq axis def forward(self, x): \"\"\" Convert a batch of", "reached. Stopping optimization.\") break if grad_threshold and pred_stft.grad.max() < grad_threshold:", "activate the iSTFT module or not. By default, it is", "not. If ``True``, the gradients for Mel filter banks will", "trainable or not. If ``True``, the gradients for Mel filter", "return the STFT result in complex number, shape = ``(num_samples,", "STFT. fmin : int The starting frequency for the lowest", "is the beginning of the CQT kernel, if ``True``, the", "center=True, pad_mode='reflect'): super().__init__() # norm arg is not functioning self.hop_length", "n_fft=2048, freq_bins=None, hop_length=512, window='hann', freq_scale='no', center=True, pad_mode='reflect', fmin=50, fmax=6000, sr=22050):", "filter banks size = {}, trainable_mel={}'.format( (*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT )", "CQT kernels if verbose==True: print(\"Creating CQT kernels ...\", end='\\r') start", "Real part of the signal. x_imag : torch tensor Imaginary", "bins per octave. Default is 12. trainable_STFT : bool Determine", "``onesided=True``, else use ``onesided=False`` length : int To make sure", "If ``True``, it shows layer information. If ``False``, it suppresses", "phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def forward_manual(self,x): \"\"\" Method", "sampling rate for the input audio. It is used to", "lowest frequency bin. If freq_scale is ``no``, this argument does", "model can support nn.DataParallel mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel) self.register_parameter('mel_basis', mel_basis)", "32.70Hz, which coresponds to the note C0. fmax : float", "Default is ``None``, which means ``n_fft//2+1`` bins. hop_length : int", "# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is", "mel_basis @ pred_stft loss = loss_fn(pred_mel, melspec) losses.append(loss.item()) loss.backward() optimizer.step()", "sz_float = 4 # size of a float epsilon =", "is to calculate the Mel-frequency cepstral coefficients (MFCCs) of the", "in [1]. I slightly modify it so that it runs", ") def to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12,", "iSTFT only works for complex number,\" \\ \"make sure our", "fftbins=True), device=device).float() def forward(self, S): \"\"\" Convert a batch of", "= 0.50, kernelLength=256, transitionBandwidth=0.001) ) # Broadcast the tensor to", "stft layer. No need for center self.stft = STFT(n_fft=n_fft, freq_bins=None,", "correct ``fmin`` and ``fmax``. Setting the correct sampling rate is", "stft and istft later self.w = torch.tensor(get_window(window, int(self.win_length), fftbins=True), device=device).float()", "pass def forward(self, x, output_format=None): \"\"\" Convert a batch of", "if output_format=='Magnitude': spec = spec_real.pow(2) + spec_imag.pow(2) if self.trainable==True: return", "kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no') wsin =", "the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation\"\"\" pass # The section below is for", "information. If ``False``, it suppresses all prints. device : str", "forward_manual(self,x): \"\"\" Method for debugging \"\"\" x = broadcast_dim(x) if", "refer to scipy documentation for possible windowing functions. The default", "tensors def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True,", "creating kernels for CQT Q = 1/(2**(1/bins_per_octave)-1) if verbose==True: print(\"Creating", "= CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': if self.trainable==False: # Getting CQT Amplitude", "a tensor of MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``. Examples", "time() lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.5, kernelLength=256, transitionBandwidth=0.001 )", "CQT kernels ...\", end='\\r') start = time() cqt_kernels, self.kernel_width, lenghts", "Please make sure the value is the same as the", "inverse STFT has the same output length of the original", "------- MFCCs : torch.tensor It returns a tensor of MFCCs.", "at 0Hz and end at Nyquist frequency with linear spacing.", "STFT result, shape = ``(num_samples, freq_bins,time_steps)``; ``Complex`` will return the", "be non-negative') # make the dim same as log_spec so", "self.register_parameter('kernel_sin', kernel_sin) self.register_parameter('kernel_cos', kernel_cos) else: self.register_buffer('kernel_sin', kernel_sin) self.register_buffer('kernel_cos', kernel_cos) #", "Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) else: CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8)", ">>> specs = spec_layer(x) \"\"\" # To DO: # need", "return torch.stack((phase_real,phase_imag), -1) def extra_repr(self) -> str: return 'STFT kernel", "or not. If ``True``, the gradients for STFT kernels will", "self.wsin, stride=self.stride) spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing STFT", "\"\"\" def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=1,", "last axis, since dct applies to the frequency axis x_shape", "self.trainable==True: wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable) wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable) self.register_parameter('wsin',", "Converting kernels from numpy arrays to torch tensors wsin =", "audio. It is used to calculate the correct ``fmin`` and", "a batch of waveforms to CQT spectrograms. Parameters ---------- x_real", "the window sumsqure for division # Only need to create", "tensor of MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``. Examples --------", "freq_scale='no', center=True, pad_mode='reflect', fmin=50, fmax=6000, sr=22050): super().__init__() self.stride = hop_length", "log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1) log_spec = torch.max(log_spec, batch_wise_max - self.top_db) return log_spec def", "audio to reduce the CQT kernel size. The result with", "= time() # self.lowpass_filter = torch.tensor( # create_lowpass_filter( # band_center", "a batch of waveforms to CQT spectrograms. Parameters ---------- x", "V return V.permute(0,2,1) # swapping back the time axis and", "output_format or self.output_format self.num_samples = x.shape[-1] x = broadcast_dim(x) if", "melspec) losses.append(loss.item()) loss.backward() optimizer.step() # Check conditions if not loss.isfinite():", "output_format : str Determine the return type. 'Magnitude' will return", "if verbose==True: print(\"STFT filter created, time used = {:.4f} seconds\".format(time()-start))", "self.stride = hop_length self.center = center self.pad_mode = pad_mode self.n_fft", "window will be put in the middle, and paddings at", "real[:, self.pad_amount:-self.pad_amount] else: if self.center: real = real[:, self.pad_amount:self.pad_amount +", "bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False, trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True):", ">>> spec_layer = Spectrogram.STFT() >>> specs = spec_layer(x) \"\"\" def", "means ``n_fft//2+1`` bins. hop_length : int The hop (or stride)", "freq_bins, time_steps, 2)``; 'Phase' will return the phase of the", "to waveforms based on the \"fast Griffin-Lim\"[1]. This Griffin Lim", "2) * 2 V = 2 * V return V.permute(0,2,1)", "input audio. It is used to calucate the correct ``fmin``", "not. If ``False``, the time index is the beginning of", "of waveforms. Examples -------- >>> spec_layer = Spectrogram.iSTFT() >>> specs", "dim same as log_spec so that it can be broadcasted", "= torch.empty((*S.shape,2), device=self.device) angles[:, :,:,0] = torch.cos(2 * np.pi *", "below is for developing purpose # Please don't use the", "is 'reflect'. inverse : bool To activate the iSTFT module", "str The windowing function for CQT. It uses ``scipy.signal.get_window``, please", "a batch of waveforms to Mel spectrograms. Parameters ---------- x", "cqt_kernels_imag) print(\"STFT kernels created, time used = {:.4f} seconds\".format(time()-start)) def", "of the input signal. This algorithm first extracts Mel spectrograms", "fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', earlydownsample=True, trainable=False,", "keep downsampling the input audio by a factor of 2", "trainable_STFT : bool Determine if the time to frequency domain", "means ``n_fft//2+1`` bins Please make sure the value is the", "elif self.pad_mode == 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x,output_format=None): \"\"\"", "broadcast_dim(x_imag) x_real.transpose_(1,2) # Prepare the right shape to do inverse", "function is to calculate the short-time Fourier transform (STFT) of", "signal. Input signal should be in either of the following", "used = {:.4f} seconds\".format(time()-start)) if trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels)", "functions. The default value is 'hann' pad_mode : str The", "elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2) x = padding(x)", "downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) CQT", "sr=sr) self.wsin = torch.tensor(wsin, dtype=torch.float) self.wcos = torch.tensor(wcos, dtype=torch.float) def", "to torch tensors wsin = torch.tensor(kernel_sin * window) wcos =", "model training. Default value is ``False``. verbose : bool If", "cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1) cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1) if trainable: cqt_kernels_real =", "filter_cutoff and creating CQT kernels Q = 1/(2**(1/bins_per_octave)-1) # Creating", "torch tensor Imaginary part of the signal. \"\"\" x_real =", "refresh_win=None): \"\"\" If your spectrograms only have ``n_fft//2+1`` frequency bins,", "with linear spacing. Please make sure the value is the", "The default value is 'hann'. center : bool Putting the", "then the discrete cosine transform is calcuated to obtain the", "pred_stft.view((*shape[:-2], n_freq, time)) if return_extras: return pred_stft, pred_mel.detach(), losses return", "= real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices]) # Remove padding if length is None: if", "use ``onesided=False`` length : int To make sure the inverse", "not loss.isfinite(): raise OverflowError(\"Overflow encountered in Mel -> STFT optimization\")", "CQT[:,-self.n_bins:,:] # Removing unwanted top bins if self.norm: CQT =", "is used to calculate the correct ``fmin`` and ``fmax``. Setting", "CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag), (fourier_real, fourier_imag)) CQT = torch.stack((CQT_real,-CQT_imag),-1) if", "different time steps if hasattr(self, 'w_sum')==False or refresh_win==True: self.w_sum =", "exceeded the Nyquist frequency, \\ please reduce the n_bins'.format(fmax_t)) if", "transform.” (1992). Parameters ---------- sr : int The sampling rate", "sign for imaginary part elif output_format=='Phase': return torch.atan2(-spec_imag+0.0,spec_real) # +0.0", "resampling method proposed in [1]. Instead of convoluting the STFT", "np.sqrt(N / 2) * 2 V = 2 * V", "= output_format self.earlydownsample = earlydownsample # TODO: activate early downsampling", "iSTFT={}, trainable={}'.format( self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable ) class MelSpectrogram(torch.nn.Module): \"\"\"This", "is why I call it version 2. [1] Brown, <NAME>.", "timesteps, you can increase the speed by setting ``refresh_win=False``. Else", "is used, the Mel scale is quasi-logarithmic. When ``True`` is", "broadcast_dim(x) if self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.pad_amount,", "Default is ``None``, therefore the higest CQT bin is inferred", "the highest CQT bin. Default is ``None``, therefore the higest", "Unless the input spectrograms have different time steps if hasattr(self,", "if onesided: X = extend_fbins(X) # extend freq X_real, X_imag", "get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) # Getting the top octave", "This alogrithm uses the resampling method proposed in [1]. Instead", "convoluting it with the small CQT kernel. Everytime the input", "the top octave CQT x_down = x # Preparing a", "downsample the input audio to reduce the CQT kernel size.", "freq the last axis, since dct applies to the frequency", "# Decide if the window function is trainable if trainable_window:", "str: return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format( self.n_fft, (*self.wsin.shape,),", "is trainable or not. Default value is ``False``. verbose :", "requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else:", "window : str The windowing function for STFT. It uses", "start = time() # Create filter windows for stft kernel_sin,", "with the downsampling factor, 2**(self.n_octaves-1) is make it # same", "GPU/RAM memory. When ``trainable=True`` and ``freq_scale!='no'``, there is no guarantee", "start = time() mel_basis = mel(sr, n_fft, n_mels, fmin, fmax,", "self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode ==", "are of the same length, please use ``refresh_win=None`` to increase", "the ``STFT`` class to save GPU/RAM memory. When ``trainable=True`` and", "nnAudio.librosa_functions import * from nnAudio.utils import * sz_float = 4", "batch of waveforms. Examples -------- >>> spec_layer = Spectrogram.iSTFT() >>>", "wcos = kernel_cos * window wsin = torch.tensor(wsin) wcos =", "hasattr(self, 'w_sum')==False or refresh_win==True: self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()", ":, :, 1] # broadcast dimensions to support 2D convolution", "extend freq X_real, X_imag = X[:, :, :, 0], X[:,", "self.pad_mode == 'reflect': if self.num_samples < self.pad_amount: raise AssertionError(\"Signal length", "optimization.\") break pred_stft = pred_stft.detach().clamp(eps) ** 0.5 pred_stft = pred_stft.view((*shape[:-2],", "To DO: # need to deal with the filter and", "CQT(CQT1992v2): \"\"\"An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2`", "__init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512, window='hann', center=True, pad_mode='reflect', power=2.0, htk=False,", "# Preparing a new variable for downsampling for i in", "trainable_kernels: # Making all these variables trainable kernel_sin = torch.nn.Parameter(kernel_sin,", "2).\") padding = nn.ReflectionPad1d(self.pad_amount) x = padding(x) spec_imag = conv1d(x,", "value is 512. fmin : float The frequency for the", "(*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT ) def to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7,", "complex value spectrograms. If you have the magnitude spectrograms, please", "self.top_db < 0: raise ParameterError('top_db must be non-negative') # make", "top minium bins if fmax_t > sr/2: raise ValueError('The top", "torch import torch.nn as nn from torch.nn.functional import conv1d, conv2d,", "(torch.pinverse(mel_basis) @ melspec).clamp(eps) pred_stft = nn.Parameter(pred_stft, requires_grad=True) sgd_kwargs[\"lr\"] = sgd_kwargs[\"lr\"]", "kernels do not need to be windowed window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1)", "kernel covering only the top octave. Then we keep downsampling", "need to create this window once to save time #", "is recommended to use the ``inverse`` method under the ``STFT``", "print(\"CQT kernels created, time used = {:.4f} seconds\".format(time()-start)) # print(\"Getting", "input is equavalent to the next lower octave. The kernel", "X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin,", "beginning of the STFT kernel, if ``True``, the time index", "spec_layer(x) \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12,", "batch of waveforms to CQT spectrograms. Parameters ---------- x_real :", "hop_length=None, window='hann', freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False, trainable_window=False, verbose=True,", "default value is 'ortho'. Normalization for DCT basis **kwargs Other", "CQT kernel size = {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT1992v2(torch.nn.Module):", "of the original waveform, please set `length` as your intended", "= ``(num_samples, n_mfcc, time_steps)``. Examples -------- >>> spec_layer = Spectrogram.MFCC()", "Applying window functions to the Fourier kernels window_mask = torch.tensor(window_mask)", "bins if fmax_t > sr/2: raise ValueError('The top bin {}Hz", "Mel filter bank. trainable_mel : bool Determine if the Mel", "range(self.n_octaves-1): hop = hop//2 x_down = downsampling_by_2(x_down, self.lowpass_filter) CQT1 =", "kernel do not share the same memory... kernel_sin_inv = torch.cat((kernel_sin,", "size. The result with and without early downsampling are more", "== 'reflect': padding = nn.ReflectionPad1d(self.n_fft//2) x = padding(x) imag =", "# Caluate num of filter requires for the kernel #", "def forward(self,x, output_format=None): \"\"\" Convert a batch of waveforms to", "Default value if ``True``. Please make sure the value is", "x = self._power_to_db(x) x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:] return x def", "Best-attempt spectrogram inversion \"\"\" def loss_fn(pred, target): pred = pred.unsqueeze(1)", "= torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0) kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0) if", "/ 2) * 2 V = 2 * V return", "end freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))", "filter windows for stft start = time() # Creating kernel", "(self.n_mfcc) ) class CQT1992(torch.nn.Module): \"\"\" This alogrithm uses the method", "to support 2D convolution X_real_bc = X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1)", "'hann'. freq_scale : 'linear', 'log', or 'no' Determine the spacing", "strictly positive') amin = torch.tensor([amin]) ref = torch.abs(torch.tensor([ref])) self.register_buffer('amin', amin)", "tprev[:,:,:] # Phase normalization angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) #", "kernel and the forward kernel do not share the same", "input spectrogram contains only half of the n_fft # Use", "= torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps) else: pred_stft = (torch.pinverse(mel_basis) @ melspec).clamp(eps)", ") ) # Broadcast the tensor to the shape that", "the end freqs = fmin * 2.0 ** (np.r_[0:n_bins] /", "for mel spectrogram start = time() mel_basis = mel(sr, n_fft,", "else: return torch.sqrt(CQT.pow(2).sum(-1)+1e-8) elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real", "keep ``refresh_win=True`` \"\"\" if (hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self,", "``freq_scale!='no'``, there is no guarantee that the inverse is perfect,", "for the original implmentation. ''' log_spec = 10.0 * torch.log10(torch.max(S,", "correct sampling rate is very important for calculating the correct", "time_steps, 2)``; 'Phase' will return the phase of the STFT", "window : str The windowing function for iSTFT. It uses", "is ``False`` norm : int Normalization for the CQT kernels.", "= hop_length self.center = center self.pad_mode = pad_mode self.n_fft =", "Nyquist frequency, \\ please reduce the n_bins'.format(fmax_t)) if self.earlydownsample ==", "V = 2 * V return V.permute(0,2,1) # swapping back", "refresh_win : bool Recalculating the window sum square. If you", "for center self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window, freq_scale='no', center=center,", "earlydownsample=True, verbose=True): super().__init__() self.norm = norm # Now norm is", "It will be automatically broadcast to the right shape output_format", "``False``. output_format : str Determine the return type. ``Magnitude`` will", "tensor Imaginary part of the signal. \"\"\" x_real = broadcast_dim(x_real)", "do inverse x_imag.transpose_(1,2) # Prepare the right shape to do", "nn.Parameter(pred_stft, requires_grad=True) sgd_kwargs[\"lr\"] = sgd_kwargs[\"lr\"] * batch_size optimizer = torch.optim.SGD([pred_stft],", "window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1] print(\"CQT kernels created,", "True: # Do early downsampling if this argument is True", "freq_bins,time_steps, 2)``. The complex number is stored as ``(real, imag)``", "than the original 1992 algorithm, that is why I call", "lenghts = np.ceil(Q * sr / freqs) lenghts = torch.tensor(lenghts).float()", "Default value is ``False`` output_format : str Determine the return", "output=0 else: return torch.sqrt(spec) elif output_format=='Complex': return torch.stack((spec_real,-spec_imag), -1) #", "function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class, which is to", "[] for i in range(max_steps): optimizer.zero_grad() pred_mel = mel_basis @", "bin. If freq_scale is ``no``, this argument does nothing. fmax", "CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) CQT = torch.cat((CQT1,", "possible windowing functions. The default value is 'hann'. Please make", "want to use `inverse`\") assert X.dim()==4 , \"Inverse iSTFT only", "Examples -------- >>> spec_layer = Spectrogram.iSTFT() >>> specs = spec_layer(x)", "fmax) self.register_buffer('lenghts', lenghts) cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1] print(\"CQT kernels created, time", "fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) else: # Calculate the top bin frequency", "bool Determine if the frequency domain CQT kernel is trainable", "the right shape \"\"\" x = broadcast_dim(x) spec = self.stft(x,", "shape[-2], shape[-1] _, n_freq = mel_basis.shape melspec = melspec.detach().view(-1, n_mels,", "\"fast Griffin-Lim\"[1]. This Griffin Lim is a direct clone from", "verbose=verbose, **kwargs) # Create filter windows for stft start =", "power=2.0, htk=False, fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False, verbose=True, **kwargs): super().__init__()", "pad_mode='reflect', trainable_STFT=False, trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True): super().__init__() self.norm = norm", "be ignored and ``n_bins`` will be calculated automatically. Default is", ", \"Inverse iSTFT only works for complex number,\" \\ \"make", "default_sgd_kwargs = dict(lr=1e3, momentum=0.9) if sgd_kwargs: default_sgd_kwargs.update(sgd_kwargs) sgd_kwargs = default_sgd_kwargs", "documentation for possible windowing functions. The default value is 'hann'", "a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1))", "\"\"\" Module containing all the spectrogram classes \"\"\" # 0.2.0", "hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect', trainable=False,", "middle, and paddings at the beginning # and ending are", "device to initialize this layer. Default value is 'cpu' Returns", "the top octave. Then we keep downsampling the input audio", "pred_stft = nn.Parameter(pred_stft, requires_grad=True) sgd_kwargs[\"lr\"] = sgd_kwargs[\"lr\"] * batch_size optimizer", "string The default value is 'ortho'. Normalization for DCT basis", "> sr/2: raise ValueError('The top bin {}Hz has exceeded the", "# Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) elif output_format=='Complex': return CQT", "n_fft : int The window size. Default value is 2048.", "output with the downsampling factor, 2**(self.n_octaves-1) # is make it", "negative signs # ifft = e^(+2\\pi*j)*X # ifft(X_real) = (a1,", "real class Griffin_Lim(torch.nn.Module): \"\"\" Converting Magnitude spectrograms back to waveforms", "= conv1d(x, self.wsin, stride=self.stride) real = conv1d(x, self.wcos, stride=self.stride) return", "inverse kernel_sin, kernel_cos, _, _, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=n_fft,", "= {} default_stft_inversion_params = {} mel_inversion_params = mel_inversion_params or {}", "The hop (or stride) size. Default value is ``None`` which", "float The frequency for the lowest CQT bin. Default is", "lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis = basis # These", "norm arg is not functioning self.trainable = trainable self.hop_length =", "of timesteps, you can increase the speed by setting ``refresh_win=False``.", "coresponds to the note C0. fmax : float The frequency", "= X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1)) b2 = conv2d(X_imag_bc,", "The starting frequency for the lowest frequency bin. If freq_scale", "bool Determine if the STFT kenrels are trainable or not.", "as ``torch.nn.Module``. Parameters ---------- sr : int The sampling rate", "bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect', trainable=False, output_format='Magnitude', verbose=True): super().__init__() #", "The default value is ``False``. fmin : int The starting", "* W_i if norm == 'ortho': V[:, :, 0] /=", "as np from time import time from nnAudio.librosa_functions import *", "x): \"\"\" Convert a batch of waveforms to MFCC. Parameters", "which will remove ``n_fft//2`` samples from the start and the", "= min(bins_per_octave, n_bins) self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) # print(\"n_octaves", "``n_fft//4``. Please make sure the value is the same as", "== 'constant': # padding = nn.ConstantPad1d(self.n_fft//2, 0) # elif self.pad_mode", "torch tensor Real part of the signal. x_imag : torch", "as 1992 CQT = CQT*self.downsample_factor # Normalize again to get", "ref=1.0, amin=1e-10, top_db=80.0, **kwargs): super().__init__() self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs)", "norm=True, basis_norm=1, window='hann', pad_mode='reflect', earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True): super().__init__() self.norm", "# Prepare the shape of window mask so that it", "waveforms. It only works for the complex value spectrograms. If", "for the kernel # n_octaves determines how many resampling requires", "# Calculate the lowest frequency bin for the top octave", "windows for stft start = time() # Creating kernel for", "CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length)", "When ``True`` is used, the Mel scale is logarithmic. The", "self.to_stft(melspec, **mel_inversion_params) recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params) return recon_audio class MFCC(torch.nn.Module):", "this argument is True if verbose==True: print(\"Creating early downsampling filter", "domain # These cqt_kernel is already in the frequency domain", "for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation\"\"\" pass #", "else pred target = target.unsqueeze(1) if target.ndim == 3 else", "self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) CQT =", "padding = nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # CQT CQT_real =", "windowing functions. The default value is 'hann'. Please make sure", "trainable or not. Default is ``False`` norm : int Normalization", "torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable) # self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable) # Applying window", "loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None): \"\"\" Best-attempt spectrogram", "or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.CQT1992v2() >>> specs", "the calculation of a constant Q transform.” (1992). This function", "seconds\".format(time()-start)) # Calculate num of filter requires for the kernel", "correct frequency. n_fft : int The window size for the", "``True``, the gradients for STFT kernels will also be caluclated", "``n_fft//2+1`` bins Please make sure the value is the same", "def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84, trainable_STFT=False, trainable_CQT=False, bins_per_octave=12,", "n_fft//2+1, timesteps)`` \"\"\" assert S.dim()==3 , \"Please make sure your", "Default value is 'reflect'. inverse : bool To activate the", "magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``; ``Complex``", "[2] [1] <NAME>. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).", "important for calculating the correct frequency. trainable_kernels : bool Determine", "else: self.hop_length = hop_length # Creating window function for stft", "CQT kernel size = {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT2010(torch.nn.Module):", "# Now norm is used to normalize the final CQT", "\",self.downsample_factor) # print(CQT.shape) # print(self.lenghts.view(-1,1).shape) # Normalizing the output with", "spectrograms. shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; shape = ``(num_samples,", "str: return 'STFT kernel size = {}, CQT kernel size", "{}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT1992v2(torch.nn.Module): \"\"\"This function is to", "conditions if not loss.isfinite(): raise OverflowError(\"Overflow encountered in Mel ->", "True): raise NameError(\"Please activate the iSTFT module by setting `iSTFT=True`", "10.0 * torch.log10(torch.max(S, self.amin)) log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref))", "the spectrogram classes \"\"\" # 0.2.0 import torch import torch.nn", "beginning # and ending are required. if self.pad_mode == 'constant':", "for calculating the correct frequency. n_fft : int The window", "center : bool Putting the STFT keneral at the center", "grad_threshold and pred_stft.grad.max() < grad_threshold: if verbose: print(f\"Target max gradient", "self.trainable = trainable self.hop_length = hop_length self.center = center self.pad_mode", "STFT kenrels are trainable or not. If ``True``, the gradients", "CQT = torch.cat((CQT1, CQT),1) CQT = CQT[:,-self.n_bins:,:] # Removing unwanted", "Oct. 2013. Parameters ---------- n_fft : int The window size.", "CQT keneral at the center of the time-step or not.", "MFCC. Parameters ---------- x : torch tensor Input signal should", "to be windowed window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1) # kernel_sin and kernel_cos", "n_fft, window) need to be the same as the STFT", "torch.sin(2 * np.pi * rand_phase) # Initializing the rebuilt magnitude", "= pad_mode self.n_fft = n_fft # Create filter windows for", "the kernel # n_octaves determines how many resampling requires for", "int The starting frequency for the lowest Mel filter bank.", "self.register_parameter('wcos', wcos) # Prepare the shape of window mask so", "n_fft # basis_norm is for normalizing basis self.hop_length = hop_length", "N) W_r = torch.cos(k) W_i = torch.sin(k) V = Vc[:,", "= (a1, a2) # ifft(X_imag)*1j = (b1, b2)*1j # =", "Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation\"\"\" pass # The section", "target).pow(2).sum(-2).mean() return loss verbose = verbose or self.verbose # SGD", "that it can be broadcasted batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1) log_spec =", "in the last axis. Default value is 'Magnitude'. verbose :", "fmin_t = {}, n_filters = {}\".format(Q, self.fmin_t, n_filters)) basis, self.n_fft,", "therefore, the usage is same as ``torch.nn.Module``. Parameters ---------- sr", "# transitionBandwidth=0.001)) lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.50, kernelLength=256, transitionBandwidth=0.001)", "self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) if verbose==True: print(\"CQT", "``False``, the time index is the beginning of the iSTFT", "kernels created, time used = {:.4f} seconds\".format(time()-start)) else: pass def", "1. ``(len_audio)``\\n 2. ``(num_audio, len_audio)``\\n 3. ``(num_audio, 1, len_audio)`` The", "the right shape \"\"\" x = broadcast_dim(x) if self.center: if", "1. ``(len_audio)``\\n 2. ``(num_audio, len_audio)``\\n 3. ``(num_audio, 1, len_audio)`` It", "To make sure the inverse STFT has the same output", "original implmentation. ''' x = x.permute(0,2,1) # make freq the", "the method proposed in [1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for", "== 'ortho': V[:, :, 0] /= np.sqrt(N) * 2 V[:,", "a batch of waveforms to spectrograms. Parameters ---------- x :", "0.2.0 import torch import torch.nn as nn from torch.nn.functional import", "self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) # print(\"n_octaves = \", self.n_octaves)", "# print(\"Getting cqt kernel done, n_fft = \",self.n_fft) # If", "'reflect'. trainable : bool Determine if the CQT kernels are", "fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time domain to freq", "torch.stft(inverse, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, pad_mode=self.pad_mode) # Phase update rule", "e^(+2\\pi*j)*X # ifft(X_real) = (a1, a2) # ifft(X_imag)*1j = (b1,", "= pad_mode self.n_bins = n_bins self.earlydownsample = earlydownsample # We", "output_format or self.output_format x = broadcast_dim(x) if self.earlydownsample==True: x =", "(pred - target).pow(2).sum(-2).mean() return loss verbose = verbose or self.verbose", "= downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding)", "bin for the top octave kernel self.fmin_t = fmin*2**(self.n_octaves-1) remainder", "for calculating the correct frequency. hop_length : int The hop", "self.stft(x, output_format='Magnitude')**self.power melspec = torch.matmul(self.mel_basis, spec) return melspec def extra_repr(self)", "following classes # class DFT(torch.nn.Module): \"\"\" Experimental feature before `torch.fft`", "audio is trainable or not. Default is ``False`` trainable_CQT :", "banks. The filter banks maps the n_fft to mel bins.", "### --------------------------- Spectrogram Classes ---------------------------### class STFT(torch.nn.Module): \"\"\"This function is", "\"\"\" # To DO: # need to deal with the", "the amplitude with n_fft real /= (self.n_fft) # Overlap and", "value is the same as the forward STFT. center :", "make the W_r and W_i trainable here k = -", "2D Conv kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1)", "fmax=fmax, sr=sr, verbose=verbose) kernel_sin = torch.tensor(kernel_sin, dtype=torch.float) kernel_cos = torch.tensor(kernel_cos,", "= mel_basis.shape melspec = melspec.detach().view(-1, n_mels, time) if random_start: pred_stft_shape", "= real[:, self.pad_amount:-self.pad_amount] else: if self.center: real = real[:, self.pad_amount:self.pad_amount", "class CQT2010(torch.nn.Module): \"\"\" This algorithm is using the resampling method", "rate is very important for calculating the correct frequency. trainable_kernels", "def forward(self,x,output_format=None): \"\"\" Convert a batch of waveforms to CQT", "optimization.\") break if grad_threshold and pred_stft.grad.max() < grad_threshold: if verbose:", "SGD arguments default_sgd_kwargs = dict(lr=1e3, momentum=0.9) if sgd_kwargs: default_sgd_kwargs.update(sgd_kwargs) sgd_kwargs", "= [] for i in range(max_steps): optimizer.zero_grad() pred_mel = mel_basis", "speed. \"\"\" if refresh_win==None: refresh_win=self.refresh_win assert X.dim()==4 , \"Inverse iSTFT", "(STFT) # We set the frequency range in the CQT", "'Magnitude' will return the magnitude of the STFT result, shape", "be used for _power_to_db if amin <= 0: raise ParameterError('amin", "conv1d(x_real, self.wcos, stride=self.stride) a2 = conv1d(x_real, self.wsin, stride=self.stride) b1 =", "filter ...\", end='\\r') start = time() # self.lowpass_filter = torch.tensor(", "is the same as the forward STFT. fmax : int", "leads to error in calculating phase def inverse(self, X, onesided=True,", "in librosa. window : str The windowing function for CQT.", "trainable_STFT : bool Determine if the STFT kenrels are trainable", "Making everything nn.Parameter, so that this model can support nn.DataParallel", "if trainable_STFT==True: # self.wsin = torch.nn.Parameter(self.wsin) # self.wcos = torch.nn.Parameter(self.wcos)", "get_window(window,int(win_length), fftbins=True) # For inverse, the Fourier kernels do not", ") class CQT1992(torch.nn.Module): \"\"\" This alogrithm uses the method proposed", "requires_grad=trainable_kernels) self.register_parameter('kernel_sin', kernel_sin) self.register_parameter('kernel_cos', kernel_cos) else: self.register_buffer('kernel_sin', kernel_sin) self.register_buffer('kernel_cos', kernel_cos)", "inverse : bool To activate the iSTFT module or not.", "frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins. hop_length", "pred_stft, pred_mel.detach(), losses return pred_stft def inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None):", "is in the shape of (batch, freq_bins, timesteps)\" # Initializing", "= \", remainder) if remainder==0: # Calculate the top bin", "the output. If your input spectrograms X are of the", "# creating kernels for stft # self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to", "Instead of convoluting the STFT results with a gigantic CQT", "freq_bins,time_steps)``. Examples -------- >>> spec_layer = Spectrogram.MelSpectrogram() >>> specs =", "if self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.n_fft//2, 0)", "recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params) return recon_audio class MFCC(torch.nn.Module): \"\"\"This function", "window='hann', pad_mode='reflect', trainable_STFT=False, trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True): super().__init__() self.norm =", "import * from nnAudio.utils import * sz_float = 4 #", "functioning self.trainable = trainable self.hop_length = hop_length self.center = center", "if self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.pad_amount, 0)", "= ``(num_samples, freq_bins,time_steps)``. Examples -------- >>> spec_layer = Spectrogram.MelSpectrogram() >>>", "= {} mel_inversion_params = mel_inversion_params or {} stft_inversion_params = stft_inversion_params", "alogrithm uses the method proposed in [1]. Please refer to", "parts spec_real = spec_real[:, :self.freq_bins, :] spec_imag = spec_imag[:, :self.freq_bins,", "Processing to Audio and Acoustics (pp. 1-4), Oct. 2013. Parameters", "return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements, which leads to", "self.pad_mode = pad_mode self.n_bins = n_bins self.earlydownsample = earlydownsample #", "the CQT relative to the downsampled input is equivalent to", "fmax, htk=htk, norm=norm) mel_basis = torch.tensor(mel_basis) if verbose==True: print(\"STFT filter", "break if grad_threshold and pred_stft.grad.max() < grad_threshold: if verbose: print(f\"Target", "# Only need to create this window once to save", "raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency,", "CQT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for", "seconds\".format(time()-start)) else: self.downsample_factor=1. # Preparing CQT kernels if verbose==True: print(\"Creating", "the original waveform, please set `length` as your intended waveform", "_, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no') wsin = kernel_sin *", "return the magnitude of the STFT result, shape = ``(num_samples,", "conv1d(x, self.wcos, stride=self.stride) return (real, -imag) def inverse(self,x_real,x_imag): \"\"\" Convert", "value is ``False`` output_format : str Control the spectrogram output", "* \\ torch.sqrt(self.lenghts.view(-1,1)) CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \\", "# Decide if the Fourier kernels are trainable if trainable_kernels:", ": bool Putting the iSTFT keneral at the center of", "wcos = torch.tensor(kernel_cos * window) cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32)) cqt_kernels_imag =", "self._dct(x, norm='ortho')[:,:self.m_mfcc,:] return x def extra_repr(self) -> str: return 'n_mfcc", "STFT. sr : int The sampling rate for the input", "the beginning of the STFT kernel, if ``True``, the time", "= self.hop_length CQT = get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) #", "Setting the correct sampling rate is very important for calculating", "device=x.device)[None, :] * np.pi / (2 * N) W_r =", "during model training. Default value is ``False``. output_format : str", "if verbose: print(f\"Target max gradient of {grad_threshold} reached. Stopping optimization.\")", "algorithm is using the resampling method proposed in [1]. Instead", "constant Q transform.” (1992). This function is to calculate the", "stride=self.hop_length) fourier_imag = conv1d(x, self.wsin, stride=self.hop_length) # CQT CQT_real, CQT_imag", "Refer to https://github.com/zh217/torch-dct for the original implmentation. ''' x =", "= overlap_add(real, self.stride) # Prepare the window sumsqure for division", "'cpu' Returns ------- spectrogram : torch.tensor It returns a tensor", "algorithm for the calculation of a constant Q transform.” (1992).", "self.hop_length = n_fft//4 else: self.hop_length = hop_length # Creating window", "transform.” (1992). Early downsampling factor is to downsample the input", "axis def forward(self, x): \"\"\" Convert a batch of waveforms", "spec_imag = conv1d(x, self.wsin, stride=self.stride) spec_real = conv1d(x, self.wcos, stride=self.stride)", "support 2D Conv kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1) kernel_cos = torch.tensor(kernel_cos,", "bool Recalculating the window sum square. If you have an", "calcuated to obtain the final MFCCs. Therefore, the Mel spectrogram", "forward STFT. hop_length : int The hop (or stride) size.", "use the ``inverse`` method under the ``STFT`` class to save", "calculating the correct frequency. trainable_kernels : bool Determine if the", "If ``False``, it suppresses all prints device : str Choose", "arrays to torch tensors wsin = torch.tensor(kernel_sin * window) wcos", "verbose) self.register_buffer('early_downsample_filter', early_downsample_filter) if verbose==True: print(\"Early downsampling filter created, \\", "# Prepare the window sumsqure for division # Only need", "calculated and the Mel filter banks will be updated during", "= x_shape[-1] v = torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])],", "as the forward STFT. fmin : int The starting frequency", "runs faster than the original 1992 algorithm, that is why", "for CQT kernels will also be caluclated and the CQT", "reduce the n_bins'.format(fmax_t)) if self.earlydownsample == True: # Do early", "loss < loss_threshold: if verbose: print(f\"Target error of {loss_threshold} reached.", "torch.sqrt(CQT.pow(2).sum(-1)) elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))", "length] else: real = real[:, :length] return real class Griffin_Lim(torch.nn.Module):", "downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding) CQT", "time import time from nnAudio.librosa_functions import * from nnAudio.utils import", "created, \\ time used = {:.4f} seconds\".format(time()-start)) else: self.downsample_factor=1. #", "broadcast_dim(x) if self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.n_fft//2,", "CQT kernels ...\", end='\\r') start = time() # print(\"Q =", "output_format # creating kernels for CQT Q = 1/(2**(1/bins_per_octave)-1) if", "# Unless the input spectrograms have different time steps if", "x.shape N = x_shape[-1] v = torch.cat([x[:, :, ::2], x[:,", "if self.trainable==True: wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable) wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable)", "torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten() self.nonzero_indices = (self.w_sum>1e-10) else: pass real[:,", "logarithmic. The default value is ``False``. fmin : int The", "it suppresses all prints Returns ------- spectrogram : torch.tensor It", "= output_format or self.output_format x = broadcast_dim(x) if self.center: if", "the same except in the very low frequency region where", "<NAME>., & <NAME>. “A fast Griffin-Lim algorithm,” IEEE Workshop on", "frequency region where freq < 40Hz. \"\"\" def __init__(self, sr=22050,", "window_mask = torch.tensor(window_mask) wsin = kernel_sin * window_mask wcos =", "``(num_samples, freq_bins,time_steps)``; ``Complex`` will return the STFT result in complex", "self.trainable = trainable start = time() # Create filter windows", "= nn.ConstantPad1d(self.n_fft//2, 0) # elif self.pad_mode == 'reflect': # padding", "elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag),", "x_real : torch tensor Real part of the signal. x_imag", "the 1992 alogrithm [2] [1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX", "freq < 40Hz. \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None,", "to the downsampled input is equivalent to the next lower", "value if ``True``. pad_mode : str The padding method. Default", "it can be broadcasted batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1) log_spec = torch.max(log_spec,", "self.basis=basis fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time", "= torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis = basis # These cqt_kernel", "of the CQT kernel. Default value if ``True``. pad_mode :", "torch tensor Spectrogram of the shape ``(batch, n_fft//2+1, timesteps)`` \"\"\"", "to CQT spectrograms. Parameters ---------- x : torch tensor Input", "= melspec.detach().view(-1, n_mels, time) if random_start: pred_stft_shape = (batch_size, n_freq,", "-1) def extra_repr(self) -> str: return 'STFT kernel size =", "(*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class CQT2010(torch.nn.Module): \"\"\" This algorithm is using", "= conv1d(x, self.wcos, stride=self.stride) return (real, -imag) def inverse(self,x_real,x_imag): \"\"\"", "= verbose or self.verbose # SGD arguments default_sgd_kwargs = dict(lr=1e3,", "constant Q transform.” (1992). Parameters ---------- sr : int The", "self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, center=self.center) # wav2spec conversion rebuilt =", "2048. freq_bins : int Number of frequency bins. Default is", "the window function and prepare the shape for batch-wise-time-wise multiplication", "The correct shape will be inferred automatically if the input", "window function is trainable or not. Default value is ``False``.", "for Melspectrogram such as n_fft, n_mels, hop_length, and window Returns", "# creating kernels for CQT Q = 1/(2**(1/bins_per_octave)-1) if verbose==True:", "the output with the downsampling factor, 2**(self.n_octaves-1) # is make", "which device to initialize this layer. Default value is 'cpu'.", "reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``. The complex number is", "frequency domain cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32)) if verbose==True:", "resampling requires for the CQT n_filters = min(bins_per_octave, n_bins) self.n_octaves", "pred target = target.unsqueeze(1) if target.ndim == 3 else target", "functions. The default value is 'hann'. freq_scale : 'linear', 'log',", "melspec def extra_repr(self) -> str: return 'Mel filter banks size", "= e^(+2\\pi*j)*X # ifft(X_real) = (a1, a2) # ifft(X_imag)*1j =", "= torch.nn.Parameter(mel_basis, requires_grad=trainable_mel) self.register_parameter('mel_basis', mel_basis) else: self.register_buffer('mel_basis', mel_basis) # if", "in the end freqs = fmin * 2.0 ** (np.r_[0:n_bins]", "Mel filter banks will be updated during model training. Default", "spectrograms X are of the same length, please use ``refresh_win=None``", "windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to", "# Remove padding if length is None: if self.center: real", "trainable_STFT=False, trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True): super().__init__() self.norm = norm #", "\\ self.earlydownsample = get_early_downsample_params(sr, hop_length, fmax_t, Q, self.n_octaves, verbose) self.register_buffer('early_downsample_filter',", "print(CQT.shape) # print(self.lenghts.view(-1,1).shape) # Normalizing the output with the downsampling", "must be non-negative') # make the dim same as log_spec", "as the normalization used in librosa. window : str The", "the forward STFT. momentum : float The momentum for the", "rand_phase = torch.randn(*S.shape, device=self.device) angles = torch.empty((*S.shape,2), device=self.device) angles[:, :,:,0]", "CQT kernel covering only the top octave. Then we keep", "index is the beginning of the STFT kernel, if ``True``,", "# Create filter windows for stft start = time() #", "size = {}, trainable_mel={}'.format( (*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT ) def to_stft(self,", "right shape \"\"\" x = broadcast_dim(x) if self.center: if self.pad_mode", "fits conv1d self.register_buffer('lowpass_filter', lowpass_filter[None,None,:]) if verbose==True: print(\"Low pass filter created,", "only works for complex number,\" \\ \"make sure our tensor", "win_length=self.win_length, window=self.w, center=self.center) # wav2spec conversion rebuilt = torch.stft(inverse, self.n_fft,", "wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr)", "is 84. Will be ignored if ``fmax`` is not ``None``.", "elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag", "= torch.stft(inverse, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, pad_mode=self.pad_mode) # Phase update", "self.cqt_kernels_real, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length)", "Determine the spacing between each frequency bin. When `linear` or", "= padding(x) # CQT CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) *", "kernel size = {}, CQT kernel size = {}'.format( (*self.wcos.shape,),", "self.pad_mode = pad_mode self.momentum = momentum self.device = device if", "created, time used = {:.4f} seconds\".format(time()-start)) # print(\"Getting cqt kernel", "self.hop_length, win_length=self.win_length, window=self.w, center=self.center) # wav2spec conversion rebuilt = torch.stft(inverse,", "self.n_fft = n_fft self.power = power self.trainable_mel = trainable_mel self.trainable_STFT", "This alogrithm uses the method proposed in [1]. I slightly", "self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.n_fft//2, 0) elif", "n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050,", "sr / freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis=basis fft_basis", "spectrogram part can be made trainable using ``trainable_mel`` and ``trainable_STFT``.", "= torch.cat((CQT1, CQT),1) CQT = CQT[:,-self.n_bins:,:] # Removing unwanted bottom", "or not. Default value is ``False``. verbose : bool If", "self.win_length=win_length if hop_length==None: self.hop_length = n_fft//4 else: self.hop_length = hop_length", "model training. Default value is ``False``. output_format : str Determine", "self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins if", "S.dim()==3 , \"Please make sure your input is in the", "= pad_mode self.momentum = momentum self.device = device if win_length==None:", "Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) else: CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8) return CQT", "input is in the shape of (batch, freq_bins, timesteps)\" #", "is the same as the forward STFT. center : bool", "top bin {}Hz has exceeded the Nyquist frequency, \\ please", "center==True, the STFT window will be put in the middle,", "rate is very important for calculating the correct frequency. hop_length", "windows for stft kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft,", "win_length=None, window='hann', center=True, pad_mode='reflect', momentum=0.99, device='cpu'): super().__init__() self.n_fft = n_fft", "center self.pad_mode = pad_mode self.n_fft = n_fft self.power = power", "\"\"\" def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0,", "so that it runs faster than the original 1992 algorithm,", "= target.unsqueeze(1) if target.ndim == 3 else target loss =", "stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) if output_format=='Magnitude': if self.trainable==False: # Getting", "Determine if the frequency domain CQT kernel is trainable or", "1) \"\"\" def __init__(self, n_fft=2048, freq_bins=None, hop_length=512, window='hann', freq_scale='no', center=True,", "spectrograms to waveforms. Parameters ---------- S : torch tensor Spectrogram", "broadcasted batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1) log_spec = torch.max(log_spec, batch_wise_max - self.top_db)", "0Hz and end at Nyquist frequency with linear spacing. Please", "center self.pad_mode = pad_mode self.norm = norm self.output_format = output_format", "a constant Q transform.” (1992). Early downsampling factor is to", "ignored if ``fmax`` is not ``None``. bins_per_octave : int Number", "``True``, the time index is the center of the iSTFT", "domain CQT kernel is trainable or not. Default is ``False``", "kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels) kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels) self.register_parameter('kernel_sin', kernel_sin)", "very important for calculating the correct frequency. trainable : bool", "there is no guarantee that the inverse is perfect, please", "= {:.4f} seconds\".format(time()-start)) else: pass def forward(self, x, output_format=None): \"\"\"", "return real class Griffin_Lim(torch.nn.Module): \"\"\" Converting Magnitude spectrograms back to", "self.cqt_kernels_imag, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1)) if output_format=='Magnitude': if self.trainable==False: #", "self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable ) class MelSpectrogram(torch.nn.Module): \"\"\"This function is", "relative to the downsampled input is equavalent to the next", "forward(self,x,output_format=None): \"\"\" Convert a batch of waveforms to CQT spectrograms.", "CQT kernels. ``1`` means L1 normalization, and ``2`` means L2", "self.kernel_width, lenghts = create_cqt_kernels(Q, sr, fmin, n_bins, bins_per_octave, norm, window,", "output_format=\"Magnitude\", verbose=verbose, **kwargs) # Create filter windows for stft start", "**kwargs) # Create filter windows for stft start = time()", "input audio to reduce the CQT kernel size. The result", "out for the positive and negative signs # ifft =", "freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer", "tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; shape", "as the forward STFT. momentum : float The momentum for", "value is the same as the forward STFT. hop_length :", "freq_bins,time_steps)`` if ``output_format='Magnitude'``; ``shape = (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex'", "= nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # CQT CQT_real = conv1d(x,", "self.n_fft).flatten() self.nonzero_indices = (self.w_sum>1e-10) else: pass real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])", "domain cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1) cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1) if trainable: cqt_kernels_real", "as the STFT in order to obtain the correct inverse.", "for the stft layer. No need for center self.stft =", "it so that it runs faster than the original 1992", "Default value is 'cpu' Returns ------- spectrogram : torch.tensor It", "= {:.4f} seconds\".format(time()-start)) # Calculate num of filter requires for", "self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) #", "n_mfcc : int The number of Mel-frequency cepstral coefficients norm", "CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': # Getting CQT", "# Making everything nn.Parameter, so that this model can support", "possible # This will be used to calculate filter_cutoff and", "mel_inversion_params: mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params} if stft_inversion_params: stft_inversion_params = {**default_stft_inversion_params,", "0) elif self.pad_mode == 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x,", "return torch.sqrt(CQT.pow(2).sum(-1)+1e-8) elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real =", "\"make sure our tensor is in the shape of (batch,", "kernel_cos * window_mask if self.trainable==False: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if", "specs = spec_layer(x) \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None,", "= real[:, self.pad_amount:self.pad_amount + length] else: real = real[:, :length]", "the forward STFT. hop_length : int The hop (or stride)", "waveforms to spectrograms. Parameters ---------- x : torch tensor Input", "self.pad_mode = pad_mode self.n_bins = n_bins self.output_format = output_format self.earlydownsample", "``Complex``. \"\"\" output_format = output_format or self.output_format self.num_samples = x.shape[-1]", "self.register_buffer('lenghts', lenghts) cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1] print(\"CQT kernels created, time used", "setting same as librosa if win_length==None: win_length = n_fft if", "real = real[:, self.pad_amount:self.pad_amount + length] else: real = real[:,", "torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps) else: pred_stft = (torch.pinverse(mel_basis) @ melspec).clamp(eps) pred_stft", "method under the ``STFT`` class to save GPU/RAM memory. When", "return pred_stft def inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None): default_mel_inversion_params = {}", "self.center: # if self.pad_mode == 'constant': # padding = nn.ConstantPad1d(self.n_fft//2,", "& <NAME>. “A fast Griffin-Lim algorithm,” IEEE Workshop on Applications", "pred_stft.detach().clamp(eps) ** 0.5 pred_stft = pred_stft.view((*shape[:-2], n_freq, time)) if return_extras:", "= min(bins_per_octave, n_bins) self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) if verbose==True:", "norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1] print(\"CQT kernels", "changed during the ``forward`` method. verbose : bool If ``True``,", "at Nyquist frequency with linear spacing. Please make sure the", "very low frequency region where freq < 40Hz. \"\"\" def", "mel_inversion_params=None, stft_inversion_params=None): default_mel_inversion_params = {} default_stft_inversion_params = {} mel_inversion_params =", "for the lowest Mel filter bank. fmax : int The", "freq < 40Hz. Parameters ---------- sr : int The sampling", "torch.tensor(fft_basis.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32)) if verbose==True: print(\"CQT kernels created, time", "same as the forward STFT. hop_length : int The hop", "shape = ``(num_samples, freq_bins,time_steps)``; ``Complex`` will return the STFT result", ": int The starting frequency for the lowest Mel filter", "result as librosa CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': if self.trainable==False:", "shape will be inferred automatically if the input follows these", "0: raise ParameterError('top_db must be non-negative') # make the dim", "# Calculate the top bin frequency fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave) self.fmin_t", "at the beginning # and ending are required. if self.pad_mode", "be made trainable using ``trainable_mel`` and ``trainable_STFT``. It only support", "= refresh_win start = time() # Create the window function", "refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more computational and memory efficient", "time used = {:.4f} seconds\".format(time()-start)) # creating kernels for stft", "use ``refresh_win=None`` to increase computational speed. \"\"\" if refresh_win==None: refresh_win=self.refresh_win", "verbose==True: print(\"STFT kernels created, time used = {:.4f} seconds\".format(time()-start)) if", "``(num_samples, freq_bins, time_steps, 2)``; 'Phase' will return the phase of", "and creating CQT kernels Q = 1/(2**(1/bins_per_octave)-1) # Creating lowpass", "= {:.4f} seconds\".format(time()-start)) print(\"Mel filter created, time used = {:.4f}", "shape of (batch, freq_bins, timesteps, 2).\"\\ \"\\nIf you have a", "pad_mode='reflect', power=2.0, htk=False, fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False, verbose=True, **kwargs):", "of the same length, please use ``refresh_win=None`` to increase computational", "self.register_buffer('wcos', wcos) if self.trainable==True: wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable) wcos =", "CQT spectrograms. Parameters ---------- x : torch tensor Input signal", "Create filter windows for inverse kernel_sin, kernel_cos, _, _, window_mask", "for the normalization in the end freqs = fmin *", "support 2D convolution X_real_bc = X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1) a1", "\\ torch.sqrt(self.lenghts.view(-1,1)) CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \\ torch.sqrt(self.lenghts.view(-1,1))", "have an input with fixed number of timesteps, you can", "device if win_length==None: self.win_length=n_fft else: self.win_length=win_length if hop_length==None: self.hop_length =", "It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``.", "# Broadcast the tensor to the shape that fits conv1d", "+0.0 removes -0.0 elements, which leads to error in calculating", "division # Only need to create this window once to", "the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``. The complex", "pass filter ...\", end='\\r') start = time() lowpass_filter = torch.tensor(create_lowpass_filter(", "1992 CQT = CQT*self.downsample_factor # Normalize again to get same", "hop_length=512, window='hann', freq_scale='no', center=True, pad_mode='reflect', fmin=50, fmax=6000, sr=22050): super().__init__() self.stride", "can be used with nn.Parallel # self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable)", "''' Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db for the original implmentation. ''' log_spec", "center : bool Putting the CQT keneral at the center", "pass def forward(self, X, onesided=False, length=None, refresh_win=None): \"\"\" If your", "lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis=basis fft_basis = fft(basis)[:,:self.n_fft//2+1] #", "``shape = (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``; Examples", "torch.sqrt(CQT.pow(2).sum(-1)+1e-8) elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))", "str: return 'n_mfcc = {}'.format( (self.n_mfcc) ) class CQT1992(torch.nn.Module): \"\"\"", "* np.pi / (2 * N) W_r = torch.cos(k) W_i", "``no``, this argument does nothing. Please make sure the value", "Spectrogram of the shape ``(batch, n_fft//2+1, timesteps)`` \"\"\" assert S.dim()==3", "the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then", "use the following classes # class DFT(torch.nn.Module): \"\"\" Experimental feature", "caluclated and the STFT kernels will be updated during model", "# CQT CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag), (fourier_real, fourier_imag)) CQT", "equivalent to the next lower octave. The kernel creation process", "melspec = torch.matmul(self.mel_basis, spec) return melspec def extra_repr(self) -> str:", "= torch.cos(2 * np.pi * rand_phase) angles[:,:,:,1] = torch.sin(2 *", "{} default_stft_inversion_params = {} mel_inversion_params = mel_inversion_params or {} stft_inversion_params", "frequency region where freq < 40Hz. Parameters ---------- sr :", "for developing purpose # Please don't use the following classes", "lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.50, kernelLength=256, transitionBandwidth=0.001) ) #", "= 0.5, kernelLength=256, transitionBandwidth=0.001 ) ) # Broadcast the tensor", "Mel-frequency cepstral coefficients (MFCCs) of the input signal. This algorithm", "# Watch out for the positive and negative signs #", "sgd_kwargs = default_sgd_kwargs mel_basis = self.mel_basis.detach() shape = melspec.shape batch_size,", "= {:.4f} seconds\".format(time()-start)) # creating kernels for stft # self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width", "trainable here k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] *", "str Choose which device to initialize this layer. Default value", "TODO: activate early downsampling later if possible # This will", "rule angles[:,:,:] = rebuilt[:,:,:] - (self.momentum / (1 + self.momentum))", "``onesided=True``, else use ``onesided=False`` To make sure the inverse STFT", "freq_bins,time_steps)`` if ``output_format='Magnitude'``; shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex'", "== 3 else target loss = (pred - target).pow(2).sum(-2).mean() return", "trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True): super().__init__() self.norm = norm # Now", "verbose=True): super().__init__() # Trying to make the default setting same", "back to waveforms based on the \"fast Griffin-Lim\"[1]. This Griffin", "the n_fft # Use extend_fbins function to get back another", "same as ``torch.nn.Module``. This alogrithm uses the resampling method proposed", "The padding method. Default value is 'reflect'. inverse : bool", "for inverse kernel_sin, kernel_cos, _, _, window_mask = create_fourier_kernels(n_fft, win_length=win_length,", "shape[-1] _, n_freq = mel_basis.shape melspec = melspec.detach().view(-1, n_mels, time)", "super().__init__() # Trying to make the default setting same as", "hop_length self.center = center self.pad_mode = pad_mode self.output_format = output_format", "lowpass_filter[None,None,:]) if verbose==True: print(\"Low pass filter created, time used =", "self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if self.trainable==True: wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable)", "output_format # creating kernels for CQT Q = 1/(2**(1/bins_per_octave)-1) print(\"Creating", "be automatically broadcast to the right shape \"\"\" x =", "to calucate the correct ``fmin`` and ``fmax``. Setting the correct", "min(bins_per_octave, n_bins) self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) # print(\"n_octaves =", "is used, the bin will start at 0Hz and end", "self.bin_list, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=freq_bins, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax,", "output_format=\"Complex\", verbose=True): super().__init__() # Trying to make the default setting", "self.pad_mode = pad_mode self.n_fft = n_fft self.power = power self.trainable_mel", "str: return 'Mel filter banks size = {}, trainable_mel={}'.format( (*self.mel_basis.shape,),", "spec_imag.pow(2) if self.trainable==True: return torch.sqrt(spec+1e-8) # prevent Nan gradient when", "spectrograms have different time steps if hasattr(self, 'w_sum')==False or refresh_win==True:", "another half if onesided: X = extend_fbins(X) # extend freq", "\"\"\" x = broadcast_dim(x) spec = self.stft(x, output_format='Magnitude')**self.power melspec =", "if amin <= 0: raise ParameterError('amin must be strictly positive')", "``None``, which means ``n_fft//2+1`` bins Please make sure the value", "n_fft self.freq_bins = freq_bins self.trainable = trainable self.pad_amount = self.n_fft", "updated during model training. Default value is ``False``. output_format :", "int The total numbers of CQT bins. Default is 84.", "1, n_fft, 1) to support 2D Conv kernel_sin = torch.tensor(kernel_sin,", "# Removing unwanted bottom bins # print(\"downsample_factor = \",self.downsample_factor) #", "= torch.stack((CQT_real,-CQT_imag),-1) if self.norm: CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT =", "beginning of the iSTFT kernel, if ``True``, the time index", "elif output_format=='Phase': return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements, which", "= conv1d(x_real, self.wcos, stride=self.stride) a2 = conv1d(x_real, self.wsin, stride=self.stride) b1", "FOR MUSIC PROCESSING.” (2010). [2] Brown, <NAME>. and <NAME>. “An", "and negative signs # ifft = e^(+2\\pi*j)*X # ifft(X_real) =", "freq_scale is ``no``, this argument does nothing. sr : int", "to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more computational and memory efficient version.", "to use the ``inverse`` method under the ``STFT`` class to", "in the real part real = a1 - b2 real", "is same as ``torch.nn.Module``. Parameters ---------- n_fft : int The", "class CQT1992v2(torch.nn.Module): \"\"\"This function is to calculate the CQT of", "= nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect': padding = nn.ReflectionPad1d(self.n_fft//2)", "'Phase'``; Examples -------- >>> spec_layer = Spectrogram.CQT2010v2() >>> specs =", "normalization angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing the phase", "Melspectrogram such as n_fft, n_mels, hop_length, and window Returns -------", "self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) if", "is 12. trainable_STFT : bool Determine if the time to", "same as the forward STFT. window : str The windowing", "or not. If ``True``, the gradients for CQT kernels will", "shape = ``(num_samples, freq_bins,time_steps, 2)``. The complex number is stored", "recon_stft = self.to_stft(melspec, **mel_inversion_params) recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params) return recon_audio", "Spectrogram.iSTFT() >>> specs = spec_layer(x) \"\"\" def __init__(self, n_fft=2048, win_length=None,", "and end at Nyquist frequency with linear spacing. center :", "float The momentum for the update rule. The default value", "to calculate the Melspectrogram of the input signal. Input signal", "(*self.wsin.shape,), self.iSTFT, self.trainable ) class MelSpectrogram(torch.nn.Module): \"\"\"This function is to", "output_format=='Complex': return torch.stack((CQT_real,CQT_imag),-1) elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag =", ": int The ending frequency for the highest Mel filter", "of the output. If your input spectrograms X are of", "1, len_audio)`` It will be automatically broadcast to the right", "torch.matmul(self.mel_basis, spec) return melspec def extra_repr(self) -> str: return 'Mel", "inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.", "x): \"\"\" Convert a batch of waveforms to Mel spectrograms.", "= trainable self.output_format = output_format # It will be used", "self.n_fft // 2 self.window = window self.win_length = win_length self.iSTFT", "if sgd_kwargs: default_sgd_kwargs.update(sgd_kwargs) sgd_kwargs = default_sgd_kwargs mel_basis = self.mel_basis.detach() shape", "direct clone from librosa.griffinlim. [1] <NAME>., <NAME>., & <NAME>. “A", "self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) if verbose==True: print(\"CQT kernels created, time used =", "amin=1e-10, top_db=80.0, **kwargs): super().__init__() self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs) self.m_mfcc", "of bins per octave. Default is 12. trainable_STFT : bool", "can also be changed during the ``forward`` method. verbose :", "is 2048. n_iter=32 : int The number of iterations for", "trainable_mel=False, trainable_STFT=False, verbose=True, **kwargs): super().__init__() self.stride = hop_length self.center =", "not. Default is ``False`` norm : int Normalization for the", "calculate filter_cutoff and creating CQT kernels Q = 1/(2**(1/bins_per_octave)-1) #", ": int The ending frequency for the highest frequency bin.", "early downsampling later if possible # This will be used", "``fmax``. Setting the correct sampling rate is very important for", "is False to save GPU memory. fmin : int The", "broadcast_dim(x_real) x_imag = broadcast_dim(x_imag) x_real.transpose_(1,2) # Prepare the right shape", "complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``; ``Phase`` will return", "CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) else: return torch.sqrt(CQT.pow(2).sum(-1)+1e-8) elif output_format=='Complex': return", "pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps) else: pred_stft = (torch.pinverse(mel_basis) @", "n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, pad_mode='reflect', iSTFT=False, fmin=50,", "self.hop_length = hop_length self.pad_mode = pad_mode self.n_bins = n_bins self.output_format", "model can be used with nn.Parallel # self.kernel_sin = torch.nn.Parameter(self.kernel_sin,", "torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N)", "= broadcast_dim(x) if self.earlydownsample==True: x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor) hop", "Choose which device to initialize this layer. Default value is", "self.output_format self.num_samples = x.shape[-1] x = broadcast_dim(x) if self.center: if", "# print(\"Getting cqt kernel done, n_fft = \",self.n_fft) # Preparing", "CQT = torch.stack((CQT_real,-CQT_imag),-1) if self.norm: CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT", "= (-b2, b1) a1 = conv1d(x_real, self.wcos, stride=self.stride) a2 =", "function and prepare the shape for batch-wise-time-wise multiplication # Create", "Can be either ``Magnitude`` or ``Complex`` or ``Phase``. Default value", "more or less the same except in the very low", "CQT bin is inferred from the ``n_bins`` and ``bins_per_octave``. If", "kernel self.fmin_t = fmin*2**(self.n_octaves-1) remainder = n_bins % bins_per_octave #", "'w_sum')==False or refresh_win==True: self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten() self.nonzero_indices", "inverse self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1)) if verbose==True: print(\"STFT kernels created, time used", "value is 'cpu' Returns ------- spectrogram : torch.tensor It returns", ": int The hop (or stride) size. Default value is", "initialize this layer. Default value is 'cpu'. Returns ------- spectrogram", "reconstruct the waveforms inverse = torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length,", "# if trainable_mel==True: # self.mel_basis = torch.nn.Parameter(self.mel_basis) # if trainable_STFT==True:", "self.win_length=n_fft else: self.win_length=win_length if hop_length==None: self.hop_length = n_fft//4 else: self.hop_length", "b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1)) # compute real and imag", "if hasattr(self, 'w_sum')==False or refresh_win==True: self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride,", "output_format=='Complex': return torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign for", "the CQT relative to the downsampled input is equavalent to", "if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.kernel_width//2, 0) elif self.pad_mode", "correct frequency. trainable : bool Determine if the STFT kenrels", "= padding(x) imag = conv1d(x, self.wsin, stride=self.stride) real = conv1d(x,", "fmin=fmin, fmax=fmax, sr=sr) self.wsin = torch.tensor(wsin, dtype=torch.float) self.wcos = torch.tensor(wcos,", "for the highest Mel filter bank. trainable_mel : bool Determine", "small CQT kernel covering only the top octave. Then we", "same mag as 1992 CQT = CQT*self.downsample_factor # Normalize again", "''' log_spec = 10.0 * torch.log10(torch.max(S, self.amin)) log_spec -= 10.0", "variable for downsampling for i in range(self.n_octaves-1): hop = hop//2", "value is ``None`` which is equivalent to ``n_fft//4``. window :", "signal should be in either of the following shapes.\\n 1.", "super().__init__() self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs) self.m_mfcc = n_mfcc #", "spec_layer = Spectrogram.STFT() >>> specs = spec_layer(x) \"\"\" def __init__(self,", "will also be calculated and the Mel filter banks will", "n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False, trainable_CQT=False, output_format='Complex', earlydownsample=True,", "print(\"iSTFT kernels created, time used = {:.4f} seconds\".format(time()-start)) else: pass", "in calculating phase def inverse(self, X, onesided=True, length=None, refresh_win=True): \"\"\"", "broadcast_dim(x) if self.earlydownsample==True: x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor) hop =", "W_r and W_i trainable here k = - torch.arange(N, dtype=x.dtype,", "self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # This is for the", "= n_mfcc # attributes that will be used for _power_to_db", "sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None): \"\"\" Best-attempt spectrogram inversion \"\"\" def", "for the highest CQT bin. Default is ``None``, therefore the", "* sr / freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis", "= Spectrogram.iSTFT() >>> specs = spec_layer(x) \"\"\" def __init__(self, n_fft=2048,", "Spectrogram.CQT1992v2() >>> specs = spec_layer(x) \"\"\" def __init__(self, sr=22050, hop_length=512,", "for CQT Q = 1/(2**(1/bins_per_octave)-1) print(\"Creating CQT kernels ...\", end='\\r')", "self.register_buffer('ref', ref) self.top_db = top_db self.n_mfcc = n_mfcc def _power_to_db(self,", "bins_per_octave, norm=basis_norm, topbin_check=False) # This is for the normalization in", "bins_per_octave # print(\"remainder = \", remainder) if remainder==0: # Calculate", "0) kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0) if iSTFT: self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1))", "start = time() sr, self.hop_length, self.downsample_factor, early_downsample_filter, \\ self.earlydownsample =", "refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation\"\"\" pass # The section below", "pass # The section below is for developing purpose #", "``Magnitude``, ``Complex``, or ``Phase``. The output_format can also be changed", "shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; shape = ``(num_samples, freq_bins,time_steps,", "cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q, sr, fmin, n_bins, bins_per_octave, norm,", "calculate the Mel-frequency cepstral coefficients (MFCCs) of the input signal.", "output_format='Magnitude')**self.power melspec = torch.matmul(self.mel_basis, spec) return melspec def extra_repr(self) ->", "Decide if the Fourier kernels are trainable if trainable_kernels: #", "eps=1e-12, return_extras=False, verbose=None): \"\"\" Best-attempt spectrogram inversion \"\"\" def loss_fn(pred,", "{}, n_filters = {}\".format(Q, self.fmin_t, n_filters)) basis, self.n_fft, _ =", "= torch.tensor([amin]) ref = torch.abs(torch.tensor([ref])) self.register_buffer('amin', amin) self.register_buffer('ref', ref) self.top_db", "-> str: return 'Mel filter banks size = {}, trainable_mel={}'.format(", "Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) elif output_format=='Complex': return CQT elif", "# This is for the normalization in the end freqs", "norm self.output_format = output_format # creating kernels for CQT Q", "n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) # For normalization in the end", "The section below is for developing purpose # Please don't", "In this way, the inverse kernel and the forward kernel", "batch of waveforms to spectrums. Parameters ---------- x : torch", "# compute real and imag part. signal lies in the", ":] * np.pi / (2 * N) W_r = torch.cos(k)", "= mel_inversion_params or {} stft_inversion_params = stft_inversion_params or {} if", "[1] <NAME>. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010). [2]", "if self.top_db is not None: if self.top_db < 0: raise", "[2] Brown, <NAME>. and <NAME>. “An efficient algorithm for the", "= torch.nn.Parameter(self.wcos) def forward(self, x): \"\"\" Convert a batch of", "self.verbose = verbose # Preparing for the stft layer. No", "n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True):", "kernel # n_octaves determines how many resampling requires for the", "hop_length, and window Returns ------- MFCCs : torch.tensor It returns", ": int The sampling rate for the input audio. It", "of spectrogram to be return. Can be either ``Magnitude`` or", "10e-8 # fudge factor for normalization ### --------------------------- Spectrogram Classes", "= basis # These cqt_kernel is already in the frequency", "for i in range(self.n_octaves-1): hop = hop//2 x_down = downsampling_by_2(x_down,", "are required. if self.pad_mode == 'constant': self.padding = nn.ConstantPad1d(self.n_fft//2, 0)", "be inferred autommatically if the input follows these 3 shapes.", "if the input follows these 3 shapes. Most of the", "# prevent Nan gradient when sqrt(0) due to output=0 else:", "the center of the iSTFT kernel. Default value if ``True``.", "= {**default_mel_inversion_params, **mel_inversion_params} if stft_inversion_params: stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params} recon_stft", "trainable_mel: # Making everything nn.Parameter, so that this model can", "attributes that will be used for _power_to_db if amin <=", "highest frequency bin. If freq_scale is ``no``, this argument does", "window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=freq_bins, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr,", "used with nn.Parallel # self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable) # self.kernel_cos", "1-4), Oct. 2013. Parameters ---------- n_fft : int The window", "= torch.randn(*S.shape, device=self.device) angles = torch.empty((*S.shape,2), device=self.device) angles[:, :,:,0] =", "need to be windowed window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1) # kernel_sin and", "= X[:, :, :, 0], X[:, :, :, 1] #", "top_db self.n_mfcc = n_mfcc def _power_to_db(self, S): ''' Refer to", "= angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing the phase # Using", "stft_inversion_params or {} if mel_inversion_params: mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params} if", "rate is very important for calculating the correct frequency. n_fft", "pad_mode='reflect', trainable=False, output_format='Magnitude', verbose=True): super().__init__() # norm arg is not", "* sr / freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis=basis", "= X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1))", "# padding = nn.ConstantPad1d(self.n_fft//2, 0) # elif self.pad_mode == 'reflect':", "same as the forward STFT. sr : int The sampling", "the time index is the beginning of the STFT kernel,", "else: # Calculate the top bin frequency fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)", "of bins per octave. Default is 12. norm : int", "\"\"\" If your spectrograms only have ``n_fft//2+1`` frequency bins, please", "is 2048. freq_bins : int Number of frequency bins. Default", "__init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann',", "the convention from librosa. This class inherits from ``torch.nn.Module``, therefore,", "torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, center=self.center) # wav2spec", "transitionBandwidth=0.001)) lowpass_filter = torch.tensor(create_lowpass_filter( band_center = 0.50, kernelLength=256, transitionBandwidth=0.001) )", "fmin=50, fmax=6000, sr=22050, trainable_kernels=False, trainable_window=False, verbose=True, refresh_win=True): super().__init__() # Trying", "hop_length self.pad_mode = pad_mode self.n_bins = n_bins self.output_format = output_format", "be calculated automatically. Default is ``None`` n_bins : int The", "requires_grad=trainable_mel) self.register_parameter('mel_basis', mel_basis) else: self.register_buffer('mel_basis', mel_basis) # if trainable_mel==True: #", "type. 'Magnitude' will return the magnitude of the STFT result,", "self.cqt_kernels_imag, stride=self.hop_length) # Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) return", "# self.lowpass_filter = torch.tensor( # create_lowpass_filter( # band_center = 0.50,", "center=True, pad_mode='reflect', power=2.0, htk=False, fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False, verbose=True,", "part elif output_format=='Phase': return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements,", "return CQT elif output_format=='Complex': return torch.stack((CQT_real,CQT_imag),-1) elif output_format=='Phase': phase_real =", "kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=freq_bins, window=window, freq_scale=freq_scale,", ": int The window size. Default value is 2048. n_iter=32", "Mel scale is logarithmic. The default value is ``False``. fmin", "freq_bins : int Number of frequency bins. Default is ``None``,", "import torch.nn as nn from torch.nn.functional import conv1d, conv2d, fold", "this layer. Default value is 'cpu' \"\"\" def __init__(self, n_fft,", "else: self.downsample_factor=1. # Preparing CQT kernels if verbose==True: print(\"Creating CQT", "``(batch, n_fft//2+1, timesteps)`` \"\"\" assert S.dim()==3 , \"Please make sure", "right shape \"\"\" output_format = output_format or self.output_format x =", "Decide if the window function is trainable if trainable_window: window_mask", "bin spacing can be controlled by ``fmin`` and ``fmax``. If", "make it same mag as 1992 CQT = CQT*self.downsample_factor if", "Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) else: return torch.sqrt(CQT.pow(2).sum(-1)+1e-8) elif output_format=='Complex': return CQT", "obtain the correct inverse. If trainability is not required, it", "length : int To make sure the inverse STFT has", "+ 1e-16) # normalizing the phase # Using the final", "value is ``False``. trainable_window : bool Determine if the window", "= time() # Create the window function and prepare the", "you have an input with fixed number of timesteps, you", "torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) class CQT(CQT1992v2): \"\"\"An", "final MFCCs. Therefore, the Mel spectrogram part can be made", "n_bins : int The total numbers of CQT bins. Default", "you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. Parameters ----------", "# norm arg is not functioning self.trainable = trainable self.hop_length", "= torch.tensor( # create_lowpass_filter( # band_center = 0.50, # kernelLength=256,", "stride=(1,1)) # compute real and imag part. signal lies in", "with a gigantic CQT kernel covering the full frequency spectrum,", "= {:.4f} seconds\".format(time()-start)) # print(\"Getting cqt kernel done, n_fft =", "a batch of waveforms to MFCC. Parameters ---------- x :", "is equivalent to ``n_fft//4``. Please make sure the value is", "Using the final phase to reconstruct the waveforms inverse =", "= ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; shape = ``(num_samples, freq_bins,time_steps, 2)``", "torch.stack((CQT_real,-CQT_imag),-1) if self.norm: CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))", "kernels for stft # self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as", ":length] return real class Griffin_Lim(torch.nn.Module): \"\"\" Converting Magnitude spectrograms back", "sure your input is in the shape of (batch, freq_bins,", "windows for stft wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft, freq_bins=n_fft, window=window,", "model training. Default value is ``False``. trainable_STFT : bool Determine", "proposed in [1]. Instead of convoluting the STFT results with", "It is used to calucate the correct ``fmin`` and ``fmax``.", "the lowest frequency bin. If freq_scale is ``no``, this argument", "norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False, trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True): super().__init__()", "is ``False``. fmin : int The starting frequency for the", "number of Mel-frequency cepstral coefficients norm : string The default", "= rebuilt[:,:,:] - (self.momentum / (1 + self.momentum)) * tprev[:,:,:]", "= create_fourier_kernels(self.kernel_width, window='ones', freq_scale='no') # Converting kernels from numpy arrays", "\"\"\"This function is to calculate the Mel-frequency cepstral coefficients (MFCCs)", "hop_length=512, window='hann', center=True, pad_mode='reflect', power=2.0, htk=False, fmin=0.0, fmax=None, norm=1, trainable_mel=False,", "is the beginning of the STFT kernel, if ``True``, the", "pred_stft = pred_stft.view((*shape[:-2], n_freq, time)) if return_extras: return pred_stft, pred_mel.detach(),", "the very low frequency region where freq < 40Hz. \"\"\"", "to the note C0. fmax : float The frequency for", "fmax=None, n_bins=84, bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect', trainable=False, output_format='Magnitude', verbose=True):", "right shape output_format : str Control the type of spectrogram", "output_format = output_format or self.output_format self.num_samples = x.shape[-1] x =", "np.ceil(Q * sr / freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts)", "please use with extra care. Parameters ---------- n_fft : int", "= int(np.ceil(float(n_bins) / bins_per_octave)) if verbose==True: print(\"num_octave = \", self.n_octaves)", "return torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0) due to", "from numpy arrays to torch tensors wsin = torch.tensor(kernel_sin *", "Normalization for DCT basis **kwargs Other arguments for Melspectrogram such", "CQT2010v2(torch.nn.Module): \"\"\"This function is to calculate the CQT of the", "torch.tensor(cqt_kernels.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32)) if trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels)", "will be inferred automatically if the input follows these 3", "part real = a1 - b2 real = real.squeeze(-2)*self.window_mask #", "= nn.ReflectionPad1d(self.n_fft//2) def forward(self,x, output_format=None): \"\"\" Convert a batch of", "lenghts) self.basis = basis # These cqt_kernel is already in", "pad_mode='reflect', fmin=50, fmax=6000, sr=22050): super().__init__() self.stride = hop_length self.center =", "[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).", "class to save GPU/RAM memory. When ``trainable=True`` and ``freq_scale!='no'``, there", "__init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=1, window='hann', center=True,", "fftbins=True) # For inverse, the Fourier kernels do not need", "{}, trainable_mel={}'.format( (*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT ) def to_stft(self, melspec, max_steps=1000,", "shapes.\\n 1. ``(len_audio)``\\n 2. ``(num_audio, len_audio)``\\n 3. ``(num_audio, 1, len_audio)``", "torch.cos(2 * np.pi * rand_phase) angles[:,:,:,1] = torch.sin(2 * np.pi", "STFT. It uses ``scipy.signal.get_window``, please refer to scipy documentation for", "norm=1, trainable_mel=False, trainable_STFT=False, verbose=True, **kwargs): super().__init__() self.stride = hop_length self.center", "= torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self) -> str: return", "\"\"\" def __init__(self, n_fft, n_iter=32, hop_length=None, win_length=None, window='hann', center=True, pad_mode='reflect',", "= n_fft # Create filter windows for stft wsin, wcos,", "freq_bins=None, hop_length=hop_length, window=window, freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT, output_format=\"Magnitude\", verbose=verbose,", "trainable self.pad_amount = self.n_fft // 2 self.window = window self.win_length", ": int Normalization for the CQT kernels. ``1`` means L1", "using the resampling method proposed in [1]. Instead of convoluting", "* 2 V = 2 * V return V.permute(0,2,1) #", "is already in the frequency domain cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1) cqt_kernels_imag", "for the CQT n_filters = min(bins_per_octave, n_bins) self.n_octaves = int(np.ceil(float(n_bins)", "output_format=='Magnitude': if self.trainable==False: # Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))", "-> str: return 'STFT kernel size = {}, CQT kernel", "don't use the following classes # class DFT(torch.nn.Module): \"\"\" Experimental", "to calculate the short-time Fourier transform (STFT) of the input", "The default value is 'ortho'. Normalization for DCT basis **kwargs", "self.center = center self.pad_mode = pad_mode self.n_fft = n_fft self.freq_bins", "= n_bins self.output_format = output_format self.earlydownsample = earlydownsample # TODO:", "trainable if trainable_kernels: # Making all these variables trainable kernel_sin", "if verbose==True: print(\"Creating early downsampling filter ...\", end='\\r') start =", ": torch.tensor It returns a tensor of MFCCs. shape =", "CQT x_down = x # Preparing a new variable for", "already in the frequency domain cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1) cqt_kernels_imag =", "= norm self.output_format = output_format # creating kernels for CQT", "result with and without early downsampling are more or less", "moment. Input signal should be in either of the following", "freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT, output_format=\"Magnitude\", verbose=verbose, **kwargs) # Create", "conv1d(x, self.wsin, stride=self.stride) spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing", "the 1992 alogrithm [2] [1] <NAME>. “CONSTANT-Q TRANSFORM TOOLBOX FOR", "be put in the middle, and paddings at the beginning", "Stopping optimization.\") break if grad_threshold and pred_stft.grad.max() < grad_threshold: if", "covering the full frequency spectrum, we make a small CQT", "window) wcos = torch.tensor(kernel_cos * window) cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32)) cqt_kernels_imag", "# Normalize the amplitude with n_fft real /= (self.n_fft) #", "self.n_octaves) # Calculate the lowest frequency bin for the top", "usage is same as ``torch.nn.Module``. Parameters ---------- sr : int", "wcos = kernel_cos * window_mask if self.trainable==False: self.register_buffer('wsin', wsin) self.register_buffer('wcos',", "windowing functions. The default value is 'hann' pad_mode : str", "This class inherits from ``torch.nn.Module``, therefore, the usage is same", "it shows layer information. If ``False``, it suppresses all prints.", "to save GPU/RAM memory. When ``trainable=True`` and ``freq_scale!='no'``, there is", "Default value is 'reflect'. trainable : bool Determine if the", ": torch.tensor It returns a tensor of spectrograms. ``shape =", "broadcast_dim(x) if self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.kernel_width//2,", "self.output_format = output_format self.earlydownsample = earlydownsample # TODO: activate early", "from torch.nn.functional import conv1d, conv2d, fold import numpy as np", "which device to initialize this layer. Default value is 'cpu'", "= time() kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones',", "iSTFT self.trainable = trainable start = time() # Create filter", ": bool Recalculating the window sum square. If you have", "the default setting same as librosa if win_length==None: win_length =", "class MelSpectrogram(torch.nn.Module): \"\"\"This function is to calculate the Melspectrogram of", "DCT at the moment. Input signal should be in either", "output_format=='Phase': return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements, which leads", "prints device : str Choose which device to initialize this", "as your intended waveform length. By default, ``length=None``, which will", "n_freq, time)) if return_extras: return pred_stft, pred_mel.detach(), losses return pred_stft", "returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``. Examples", "higest CQT bin is inferred from the ``n_bins`` and ``bins_per_octave``.", "used = {:.4f} seconds\".format(time()-start)) # Calculate num of filter requires", "spec_real = spec_real[:, :self.freq_bins, :] spec_imag = spec_imag[:, :self.freq_bins, :]", "spec_layer = Spectrogram.CQT1992v2() >>> specs = spec_layer(x) \"\"\" def __init__(self,", "if self.norm: CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1)) else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) #", "factor is to downsample the input audio to reduce the", "the STFT window will be put in the middle, and", "``(num_samples, freq_bins,time_steps, 2)``; ``Phase`` will return the phase of the", "if the CQT kernels are trainable or not. If ``True``,", "and ``trainable_STFT``. It only support type-II DCT at the moment.", "2013. Parameters ---------- n_fft : int The window size. Default", "in the frequency domain cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1) cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1)", "the Melspectrogram of the input signal. Input signal should be", "def forward(self, S): \"\"\" Convert a batch of magnitude spectrograms", "full frequency spectrum, we make a small CQT kernel covering", "kernel. Everytime the input audio is downsampled, the CQT relative", "``fmax`` is not ``None``, then the argument ``n_bins`` will be", "real.squeeze(-2)*self.window_mask # Normalize the amplitude with n_fft real /= (self.n_fft)", "bank. trainable_mel : bool Determine if the Mel filter banks", "refresh_win=True): \"\"\" This function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class,", "frequency for the highest frequency bin. If freq_scale is ``no``,", "n_bins) self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) # print(\"n_octaves = \",", "self.wsin, stride=self.stride) imag = a2+b1 real = a1-b2 return (real/self.n_fft,", "If freq_scale is ``no``, this argument does nothing. Please make", "will return the STFT result in complex number, shape =", "bins # print(\"downsample_factor = \",self.downsample_factor) # print(CQT.shape) # print(self.lenghts.view(-1,1).shape) #", "self.amin)) log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref)) if self.top_db is", "relative to the downsampled input is equivalent to the next", "self.wsin, stride=self.hop_length) # CQT CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag), (fourier_real,", "use ``onesided=True``, else use ``onesided=False`` length : int To make", "if the frequency domain CQT kernel is trainable or not.", "are more or less the same except in the very", "default value is 'hann'. Please make sure the value is", "(self.n_fft) # Overlap and Add algorithm to connect all the", "str Determine the return type. ``Magnitude`` will return the magnitude", "alogrithm [2] [1] <NAME>. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.”", "batch_size, n_mels, time = shape[0], shape[-2], shape[-1] _, n_freq =", "Else please keep ``refresh_win=True`` \"\"\" if (hasattr(self, 'kernel_sin_inv') != True)", "else target loss = (pred - target).pow(2).sum(-2).mean() return loss verbose", "def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, fmin=50,", ": str The windowing function for STFT. It uses ``scipy.signal.get_window``,", "-1) # Remember the minus sign for imaginary part elif", "Putting the STFT keneral at the center of the time-step", "nn.Parameter, so that this model can support nn.DataParallel mel_basis =", "length=None, refresh_win=None): \"\"\" If your spectrograms only have ``n_fft//2+1`` frequency", "x_imag : torch tensor Imaginary part of the signal. \"\"\"", "Default value is 512. window : str The windowing function", "n_bins, bins_per_octave, norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1)", "CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) return CQT*torch.sqrt(self.lenghts.view(-1,1)) class CQT2010v2(torch.nn.Module): \"\"\"This", "_, window = create_fourier_kernels(self.kernel_width, window='ones', freq_scale='no') # Converting kernels from", "n_bins, bins_per_octave, norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1]", "# swapping back the time axis and freq axis def", "STFT fourier_real = conv1d(x, self.wcos, stride=self.hop_length) fourier_imag = conv1d(x, self.wsin,", "= torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten() self.nonzero_indices = (self.w_sum>1e-10) else: pass", "= {:.4f} seconds\".format(time()-start)) else: pass def forward(self, X, onesided=False, length=None,", "MFCCs : torch.tensor It returns a tensor of MFCCs. shape", "stride) size. Default value is 512. window : str The", "start = time() kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.kernel_width,", "and Add algorithm to connect all the frames real =", "onesided : bool If your spectrograms only have ``n_fft//2+1`` frequency", "{:.4f} seconds\".format(time()-start)) else: pass if trainable_mel: # Making everything nn.Parameter,", "self.n_fft // 2 self.refresh_win = refresh_win start = time() #", "Use extend_fbins function to get back another half if onesided:", "is to convert spectrograms back to waveforms. It only works", "= trainable_STFT self.verbose = verbose # Preparing for the stft", "is no guarantee that the inverse is perfect, please use", "the moment. Input signal should be in either of the", "of waveforms to spectrograms. Parameters ---------- x : torch tensor", "= get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding) CQT = torch.cat((CQT1, CQT),1)", "hop (or stride) size. Default value is 512. window :", "'reflect'. htk : bool When ``False`` is used, the Mel", "= kernel_sin * window wcos = kernel_cos * window wsin", "= trainable self.stride = hop_length self.center = center self.pad_mode =", "x_imag = broadcast_dim(x_imag) x_real.transpose_(1,2) # Prepare the right shape to", ": int The window size for the STFT. Default value", "can be broadcasted batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1) log_spec = torch.max(log_spec, batch_wise_max", "and ending are required. if self.pad_mode == 'constant': self.padding =", "if self.top_db < 0: raise ParameterError('top_db must be non-negative') #", "(batch_size, n_freq, time) pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps) else: pred_stft", "CQT Q = 1/(2**(1/bins_per_octave)-1) if verbose==True: print(\"Creating CQT kernels ...\",", "librosa. window : str The windowing function for CQT. It", "STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;", "output_format self.trainable = trainable self.stride = hop_length self.center = center", "str The padding method. Default value is 'reflect'. htk :", "the 1992 algorithm. Therefore, we can reuse the code from", "default value is 'hann'. center : bool Putting the CQT", "pred.unsqueeze(1) if pred.ndim == 3 else pred target = target.unsqueeze(1)", "to calculate the Mel-frequency cepstral coefficients (MFCCs) of the input", "only works for 1 single frame. i.e. input shape =", "elif output_format=='Complex': return torch.stack((CQT_real,CQT_imag),-1) elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag", "section below is for developing purpose # Please don't use", "the time index is the center of the iSTFT kernel.", "documentation for possible windowing functions. The default value is 'hann'.", "shape \"\"\" x = broadcast_dim(x) spec = self.stft(x, output_format='Magnitude')**self.power melspec", "axis, since dct applies to the frequency axis x_shape =", "``False``, the time index is the beginning of the STFT", "STFT result in complex number, shape = ``(num_samples, freq_bins, time_steps,", "``(num_samples, n_mfcc, time_steps)``. Examples -------- >>> spec_layer = Spectrogram.MFCC() >>>", "\"\"\"This class is to convert spectrograms back to waveforms. It", "tprev = rebuilt # Saving previous rebuilt magnitude spec #", "the input audio is trainable or not. Default is ``False``", "padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect': padding =", "do inverse # if self.center: # if self.pad_mode == 'constant':", "wcos) else: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if trainable_CQT: cqt_kernels_real =", "== 'reflect': padding = nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # CQT", "\\ time used = {:.4f} seconds\".format(time()-start)) else: self.downsample_factor=1. # Preparing", "log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref)) if self.top_db is not", "return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))", "verbose==True: print(\"iSTFT kernels created, time used = {:.4f} seconds\".format(time()-start)) else:", "as the forward STFT. window : str The windowing function", "if norm == 'ortho': V[:, :, 0] /= np.sqrt(N) *", "method. Default value is 'reflect'. trainable : bool Determine if", "self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) print(\"STFT kernels created,", "will be updated during model training. Default value is ``False``.", "for imaginary part elif output_format=='Phase': return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes", "torch.tensor(create_lowpass_filter( band_center = 0.50, kernelLength=256, transitionBandwidth=0.001) ) # Broadcast the", "\"Inverse iSTFT only works for complex number,\" \\ \"make sure", "specs = spec_layer(x) \"\"\" def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512,", "device='cpu'): super().__init__() self.n_fft = n_fft self.win_length = win_length self.n_iter =", "downsampled input is equivalent to the next lower octave. The", "kernels will be updated during model training. Default value is", "Applications of Signal Processing to Audio and Acoustics (pp. 1-4),", "frequency with linear spacing. center : bool Putting the STFT", "later in inverse self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1)) if verbose==True: print(\"STFT kernels created,", "the STFT keneral at the center of the time-step or", "When `linear` or `log` is used, the bin spacing can", "equivalent to ``n_fft//4``. Please make sure the value is the", "top bin frequency fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) else: # Calculate the", "onesided=False, length=None, refresh_win=None): \"\"\" If your spectrograms only have ``n_fft//2+1``", "this window once to save time # Unless the input", "the resampling method proposed in [1]. Instead of convoluting the", "Convert a batch of magnitude spectrograms to waveforms. Parameters ----------", "= conv2d(X_imag_bc, self.kernel_sin, stride=(1,1)) # compute real and imag part.", "self.kernel_cos, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1)) # compute real", "if self.center: real = real[:, self.pad_amount:-self.pad_amount] else: if self.center: real", "top octave kernel self.fmin_t = fmin*2**(self.n_octaves-1) remainder = n_bins %", "stft # self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as librosa #", "= earlydownsample # We will activate early downsampling later if", "= torch.nn.Parameter(self.wsin) # self.wcos = torch.nn.Parameter(self.wcos) def forward(self, x): \"\"\"", "CQT bins. Default is 84. Will be ignored if ``fmax``", "calculating the correct frequency. n_mfcc : int The number of", "does nothing. Please make sure the value is the same", "``n_fft//2`` samples from the start and the end of the", "the following shapes.\\n 1. ``(len_audio)``\\n 2. ``(num_audio, len_audio)``\\n 3. ``(num_audio,", "n_mfcc def _power_to_db(self, S): ''' Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db for the", "torch.nn as nn from torch.nn.functional import conv1d, conv2d, fold import", "of the n_fft # Use extend_fbins function to get back", "trainable self.hop_length = hop_length self.center = center self.pad_mode = pad_mode", "output_format=='Complex': return CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag =", "(real, -imag) def inverse(self,x_real,x_imag): \"\"\" Convert a batch of waveforms", "“An efficient algorithm for the calculation of a constant Q", "with the downsampling factor, 2**(self.n_octaves-1) # is make it same", "S : torch tensor Spectrogram of the shape ``(batch, n_fft//2+1,", "self.pad_mode = pad_mode self.n_fft = n_fft self.freq_bins = freq_bins self.trainable", "torch.tensor(wcos, dtype=torch.float) def forward(self,x): \"\"\" Convert a batch of waveforms", "STFT kernels ...\", end='\\r') start = time() kernel_sin, kernel_cos, self.bins2freq,", "algorithm first extracts Mel spectrograms from the audio clips, then", "elif self.pad_mode == 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x, output_format=None):", "covering only the top octave. Then we keep downsampling the", "amin <= 0: raise ParameterError('amin must be strictly positive') amin", "transformation kernel for the input audio is trainable or not.", "make the dim same as log_spec so that it can", "input with fixed number of timesteps, you can increase the", "trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real',", "center self.pad_mode = pad_mode self.n_fft = n_fft self.freq_bins = freq_bins", "a torch tensor if verbose==True: print(\"Creating low pass filter ...\",", "= shape[0], shape[-2], shape[-1] _, n_freq = mel_basis.shape melspec =", "verbose = verbose or self.verbose # SGD arguments default_sgd_kwargs =", "return torch.sqrt(CQT.pow(2).sum(-1)) elif output_format=='Complex': return CQT elif output_format=='Phase': phase_real =", "3 else target loss = (pred - target).pow(2).sum(-2).mean() return loss", "to obtain the correct inverse. If trainability is not required,", "84. Will be ignored if ``fmax`` is not ``None``. bins_per_octave", "Examples -------- >>> spec_layer = Spectrogram.CQT2010v2() >>> specs = spec_layer(x)", "the top minium bins if fmax_t > sr/2: raise ValueError('The", "Default is 12. trainable_STFT : bool Determine if the time", "mel spectrogram start = time() mel_basis = mel(sr, n_fft, n_mels,", "= torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) class CQT(CQT1992v2):", "torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1) # Decide if the Fourier kernels are trainable", "spec) return melspec def extra_repr(self) -> str: return 'Mel filter", "torch.randn(*S.shape, device=self.device) angles = torch.empty((*S.shape,2), device=self.device) angles[:, :,:,0] = torch.cos(2", "if possible # This will be used to calculate filter_cutoff", "CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return", "# These cqt_kernel is already in the frequency domain cqt_kernels_real", "if target.ndim == 3 else target loss = (pred -", "time used = {:.4f} seconds\".format(time()-start)) else: pass if trainable_mel: #", "for Mel filter banks will also be calculated and the", "window wsin = torch.tensor(wsin) wcos = torch.tensor(wcos) if verbose==True: print(\"STFT", "norm='ortho')[:,:self.m_mfcc,:] return x def extra_repr(self) -> str: return 'n_mfcc =", "hop//2 x_down = downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag,", "signal. \"\"\" x_real = broadcast_dim(x_real) x_imag = broadcast_dim(x_imag) x_real.transpose_(1,2) #", "Getting CQT Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) return CQT*torch.sqrt(self.lenghts.view(-1,1)) class CQT2010v2(torch.nn.Module):", "else: CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) # Normalizing the output with the", "cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32)) if verbose==True: print(\"CQT kernels", "same result as librosa CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': if", "or not. By default, it is False to save GPU", "def extra_repr(self) -> str: return 'Mel filter banks size =", "of (batch, freq_bins, timesteps, 2).\"\\ \"\\nIf you have a magnitude", "rebuilt magnitude spectrogram rebuilt = torch.zeros(*angles.shape, device=self.device) for _ in", "# Phase update rule angles[:,:,:] = rebuilt[:,:,:] - (self.momentum /", "not. By default, it is False to save GPU memory.", "must be strictly positive') amin = torch.tensor([amin]) ref = torch.abs(torch.tensor([ref]))", "be used to calculate filter_cutoff and creating CQT kernels Q", "torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])], dim=2) Vc = torch.rfft(v,", "'constant': self.padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect': self.padding", "of the signal. x_imag : torch tensor Imaginary part of", "= self.stft(x, output_format='Magnitude')**self.power melspec = torch.matmul(self.mel_basis, spec) return melspec def", "we can reuse the code from the 1992 alogrithm [2]", "self.basis = basis # These cqt_kernel is already in the", "``(num_samples, freq_bins,time_steps)``. Examples -------- >>> spec_layer = Spectrogram.MelSpectrogram() >>> specs", "# Trying to normalize as librosa # self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width print(\"Creating STFT", "[1]. Instead of convoluting the STFT results with a gigantic", "to waveforms. Parameters ---------- S : torch tensor Spectrogram of", "\"\"\" Convert a batch of waveforms to CQT spectrograms. Parameters", "freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis=basis fft_basis = fft(basis)[:,:self.n_fft//2+1]", "= torch.tensor(mel_basis) if verbose==True: print(\"STFT filter created, time used =", "- (self.momentum / (1 + self.momentum)) * tprev[:,:,:] # Phase", "grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None): \"\"\" Best-attempt spectrogram inversion", "The parameters (e.g. n_fft, window) need to be the same", "= ``(num_samples, freq_bins,time_steps)``; ``Complex`` will return the STFT result in", "Creating kernel for mel spectrogram start = time() mel_basis =", "default value is 'hann'. center : bool Putting the STFT", "function is to calculate the CQT of the input signal.", "# print(\"Q = {}, fmin_t = {}, n_filters = {}\".format(Q,", "factor, 2**(self.n_octaves-1) is make it # same mag as 1992", "kernel_sin) self.register_parameter('kernel_cos', kernel_cos) else: self.register_buffer('kernel_sin', kernel_sin) self.register_buffer('kernel_cos', kernel_cos) # Decide", "method. verbose : bool If ``True``, it shows layer information.", "spectrograms. Parameters ---------- x : torch tensor Input signal should", "a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``. Examples --------", "Everytime the input audio is downsampled, the CQT relative to", "window wcos = kernel_cos * window wsin = torch.tensor(wsin) wcos", "< 40Hz. \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,", "# self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable) # Applying window functions to", "if self.earlydownsample==True: x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor) hop = self.hop_length", "ifft(X_imag)*1j = (b1, b2)*1j # = (-b2, b1) a1 =", "Input signal should be in either of the following shapes.\\n", "alogrithm uses the resampling method proposed in [1]. Instead of", "= fft(cqt_kernels)[:,:self.kernel_width//2+1] print(\"CQT kernels created, time used = {:.4f} seconds\".format(time()-start))", "\"\"\" Convert a batch of waveforms to spectrograms. Parameters ----------", "the CQT kernels. ``1`` means L1 normalization, and ``2`` means", "region where freq < 40Hz. Parameters ---------- sr : int", "2 * V return V.permute(0,2,1) # swapping back the time", "the right shape \"\"\" x = self.melspec_layer(x) x = self._power_to_db(x)", "to downsample the input audio to reduce the CQT kernel", "as librosa if win_length==None: win_length = n_fft if hop_length==None: hop_length", "the iSTFT module by setting `iSTFT=True` if you want to", "early downsampling are more or less the same except in", "same as ``torch.nn.Module``. Parameters ---------- sr : int The sampling", "of waveforms to CQT spectrograms. Parameters ---------- x_real : torch", "int(self.win_length), fftbins=True), device=device).float() def forward(self, S): \"\"\" Convert a batch", "shape[0], shape[-2], shape[-1] _, n_freq = mel_basis.shape melspec = melspec.detach().view(-1,", "your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,", "update rule. The default value is ``0.99``. device : str", "verbose : bool If ``True``, it shows layer information. If", "kernels for Short-Time Fourier Transform (STFT) # We set the", ": int The starting frequency for the lowest frequency bin.", "n_bins % bins_per_octave # print(\"remainder = \", remainder) if remainder==0:", "important for calculating the correct frequency. n_mfcc : int The", "STFT has the same output length of the original waveform,", "seconds\".format(time()-start)) # print(\"Getting cqt kernel done, n_fft = \",self.n_fft) #", "# self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as librosa # self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width", "window function is trainable if trainable_window: window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window)", "__init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, pad_mode='reflect', iSTFT=False,", "wsin = kernel_sin * window_mask wcos = kernel_cos * window_mask", "N = x_shape[-1] v = torch.cat([x[:, :, ::2], x[:, :,", "banks are trainable or not. If ``True``, the gradients for", "CQT kernel covering the full frequency spectrum, we make a", "kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0) kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0)", "= x.permute(0,2,1) # make freq the last axis, since dct", "**kwargs) self.m_mfcc = n_mfcc # attributes that will be used", "and end at Nyquist frequency with linear spacing. Please make", "note C0. fmax : float The frequency for the highest", "torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos)", "for calculating the correct frequency. trainable : bool Determine if", "Default is ``False`` norm : int Normalization for the CQT", "`log` is used, the bin spacing can be controlled by", "target loss = (pred - target).pow(2).sum(-2).mean() return loss verbose =", "convoluting the STFT results with a gigantic CQT kernel covering", "< loss_threshold: if verbose: print(f\"Target error of {loss_threshold} reached. Stopping", "real part real = a1 - b2 real = real.squeeze(-2)*self.window_mask", "save GPU/RAM memory. When ``trainable=True`` and ``freq_scale!='no'``, there is no", "very important for calculating the correct frequency. trainable_kernels : bool", "np.sqrt(N) * 2 V[:, :, 1:] /= np.sqrt(N / 2)", "of here. if verbose==True: print(\"Creating STFT kernels ...\", end='\\r') start", "time_steps)``. Examples -------- >>> spec_layer = Spectrogram.MFCC() >>> mfcc =", "a gigantic CQT kernel covering the full frequency spectrum, we", "is trainable if trainable_window: window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window) self.register_parameter('window_mask', window_mask)", "'reflect': # padding = nn.ReflectionPad1d(self.n_fft//2) # x_real = padding(x_real) #", "layer. Default value is 'cpu' Returns ------- spectrogram : torch.tensor", "(*self.cqt_kernels_real.shape,) ) class CQT2010(torch.nn.Module): \"\"\" This algorithm is using the", "are trainable or not. If ``True``, the gradients for Mel", "is ``no``, this argument does nothing. Please make sure the", "``forward`` method. verbose : bool If ``True``, it shows layer", "Fourier kernels are trainable if trainable_kernels: # Making all these", "wcos) if self.trainable==True: wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable) wcos = torch.nn.Parameter(wcos,", "is for normalizing basis self.hop_length = hop_length self.pad_mode = pad_mode", "size. Default value is 2048. n_iter=32 : int The number", "= torch.tensor(cqt_kernels.imag.astype(np.float32)) if trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos =", "earlydownsample # TODO: activate early downsampling later if possible #", "usage is same as ``torch.nn.Module``. This alogrithm uses the method", "output. refresh_win : bool Recalculating the window sum square. If", "ignored and ``n_bins`` will be calculated automatically. Default is ``None``", "window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1) # kernel_sin and kernel_cos have the shape", "positive') amin = torch.tensor([amin]) ref = torch.abs(torch.tensor([ref])) self.register_buffer('amin', amin) self.register_buffer('ref',", "it version 2. [1] Brown, <NAME>. and <NAME>. “An efficient", "shape \"\"\" x = broadcast_dim(x) if self.center: if self.pad_mode ==", "already in the frequency domain cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32)) cqt_kernels_imag =", "is same as ``torch.nn.Module``. Parameters ---------- sr : int The", "to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db for the original implmentation. ''' log_spec = 10.0", "(a1, a2) # ifft(X_imag)*1j = (b1, b2)*1j # = (-b2,", "the iSTFT keneral at the center of the time-step or", "iterations for Griffin-Lim. The default value is ``32`` hop_length :", ": bool If ``True``, it shows layer information. If ``False``,", "-= 10.0 * torch.log10(torch.max(self.amin, self.ref)) if self.top_db is not None:", "be caluclated and the CQT kernels will be updated during", "Default value is 'cpu' \"\"\" def __init__(self, n_fft, n_iter=32, hop_length=None,", "and Acoustics (pp. 1-4), Oct. 2013. Parameters ---------- n_fft :", ": 'linear', 'log', or 'no' Determine the spacing between each", "power self.trainable_mel = trainable_mel self.trainable_STFT = trainable_STFT self.verbose = verbose", "self.output_format = output_format # creating kernels for CQT Q =", "fixed number of timesteps, you can increase the speed by", "/ freqs) lenghts = torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis=basis fft_basis =", "conv1d self.register_buffer('lowpass_filter', lowpass_filter[None,None,:]) if verbose==True: print(\"Low pass filter created, time", "self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width print(\"Creating STFT kernels ...\", end='\\r') start = time() kernel_sin,", "it a torch tensor if verbose==True: print(\"Creating low pass filter", ": str The padding method. Default value is 'reflect'. trainable", "of waveforms to MFCC. Parameters ---------- x : torch tensor", "is the same as the forward STFT. window : str", "can support nn.DataParallel mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel) self.register_parameter('mel_basis', mel_basis) else:", "bin. Default is ``None``, therefore the higest CQT bin is", "the highest frequency bin. If freq_scale is ``no``, this argument", "nn.ReflectionPad1d(self.kernel_width//2) x = padding(x) # STFT fourier_real = conv1d(x, self.wcos,", "for _ in range(self.n_iter): tprev = rebuilt # Saving previous", "# Phase normalization angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing", "log_spec = torch.max(log_spec, batch_wise_max - self.top_db) return log_spec def _dct(self,", "\"\"\" def __init__(self, n_fft=2048, freq_bins=None, hop_length=512, window='hann', freq_scale='no', center=True, pad_mode='reflect',", "to reduce the CQT kernel size. The result with and", "is used, the bin spacing can be controlled by ``fmin``", "2 V[:, :, 1:] /= np.sqrt(N / 2) * 2", "is not ``None``, then the argument ``n_bins`` will be ignored", "trainable=False, output_format='Magnitude', verbose=True): super().__init__() self.norm = norm # Now norm", "low frequency region where freq < 40Hz. Parameters ---------- sr", "``no``, this argument does nothing. sr : int The sampling", "cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32)) if trainable_STFT: wsin =", "your input spectrograms X are of the same length, please", "Convert a batch of waveforms to spectrums. Parameters ---------- x", "a magnitude spectrogram, please consider using Griffin-Lim.\" if onesided: X", "Preparing for the stft layer. No need for center self.stft", "pass filter created, time used = {:.4f} seconds\".format(time()-start)) # Calculate", "3. ``(num_audio, 1, len_audio)`` The correct shape will be inferred", "sgd_kwargs[\"lr\"] * batch_size optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs) losses = []", "n_mfcc, time_steps)``. Examples -------- >>> spec_layer = Spectrogram.MFCC() >>> mfcc", "* window) cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32)) if trainable_STFT:", "``True``, it shows layer information. If ``False``, it suppresses all", "get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding) CQT = torch.cat((CQT1, CQT),1) CQT", "return torch.stack((phase_real,phase_imag), -1) class CQT(CQT1992v2): \"\"\"An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please", "if ``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.CQT2010v2()", "the downsampling factor, 2**(self.n_octaves-1) is make it # same mag", "else: self.register_buffer('mel_basis', mel_basis) # if trainable_mel==True: # self.mel_basis = torch.nn.Parameter(self.mel_basis)", "padding(x) imag = conv1d(x, self.wsin, stride=self.stride) real = conv1d(x, self.wcos,", "(1992). early downsampling factor is to downsample the input audio", "and istft later self.w = torch.tensor(get_window(window, int(self.win_length), fftbins=True), device=device).float() def", "kernel_cos, _, _, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=n_fft, window=window, freq_scale=freq_scale,", "for possible windowing functions. The default value is 'hann'. freq_scale", "for possible windowing functions. The default value is 'hann'. Please", "of the input signal. Input signal should be in either", "norm=norm) mel_basis = torch.tensor(mel_basis) if verbose==True: print(\"STFT filter created, time", "2 to convoluting it with the small CQT kernel. Everytime", "S): ''' Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db for the original implmentation. '''", "self.wsin, stride=self.stride) real = conv1d(x, self.wcos, stride=self.stride) return (real, -imag)", "(batch, freq_bins, timesteps)\" # Initializing Random Phase rand_phase = torch.randn(*S.shape,", "STFT. hop_length : int The hop (or stride) size. Default", "stride=self.stride) return (real, -imag) def inverse(self,x_real,x_imag): \"\"\" Convert a batch", "= \", self.n_octaves) # Calculate the lowest frequency bin for", "to do inverse # if self.center: # if self.pad_mode ==", "``scipy.signal.get_window``, please refer to scipy documentation for possible windowing functions.", "trainable self.stride = hop_length self.center = center self.pad_mode = pad_mode", "order to obtain the correct inverse. If trainability is not", "self.hop_length = hop_length self.center = center self.pad_mode = pad_mode self.norm", "Trying to make the default setting same as librosa if", "pred_stft = (torch.pinverse(mel_basis) @ melspec).clamp(eps) pred_stft = nn.Parameter(pred_stft, requires_grad=True) sgd_kwargs[\"lr\"]", "be inferred automatically if the input follows these 3 shapes.", "lenghts = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave, norm=basis_norm, topbin_check=False) #", "verbose: print(f\"Target max gradient of {grad_threshold} reached. Stopping optimization.\") break", "and window Returns ------- MFCCs : torch.tensor It returns a", "self.earlydownsample = earlydownsample # We will activate early downsampling later", "class DFT(torch.nn.Module): \"\"\" Experimental feature before `torch.fft` was made avaliable.", "verbose==True: print(\"Creating CQT kernels ...\", end='\\r') start = time() #", "imag = a2+b1 real = a1-b2 return (real/self.n_fft, imag/self.n_fft) class", "is still same as the 1992 algorithm. Therefore, we can", "n_mels, time = shape[0], shape[-2], shape[-1] _, n_freq = mel_basis.shape", "torch.log10(torch.max(S, self.amin)) log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref)) if self.top_db", "a constant Q transform.” (1992). early downsampling factor is to", "self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window, freq_scale='no', center=center, pad_mode=pad_mode, sr=sr,", "= n_bins % bins_per_octave # print(\"remainder = \", remainder) if", "output_format # It will be used to calculate filter_cutoff and", "STFT kernel. Default value if ``True``. pad_mode : str The", "= {**default_stft_inversion_params, **stft_inversion_params} recon_stft = self.to_stft(melspec, **mel_inversion_params) recon_audio = self.stft.inverse(recon_stft,", "torch.tensor(kernel_cos, dtype=torch.float) # In this way, the inverse kernel and", "The hop (or stride) size. Default value is 512. window", "if verbose==True: print(\"STFT kernels created, time used = {:.4f} seconds\".format(time()-start))", "verbose or self.verbose # SGD arguments default_sgd_kwargs = dict(lr=1e3, momentum=0.9)", "(1992). Early downsampling factor is to downsample the input audio", "# Making all these variables trainable kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels)", "\"\"\" x_real = broadcast_dim(x_real) x_imag = broadcast_dim(x_imag) x_real.transpose_(1,2) # Prepare", "``32`` hop_length : int The hop (or stride) size. Default", "kernel done, n_fft = \",self.n_fft) # If center==True, the STFT", "creating CQT kernels Q = 1/(2**(1/bins_per_octave)-1) # Creating lowpass filter", "nothing. Please make sure the value is the same as", "Determine the return type. 'Magnitude' will return the magnitude of", "pad_mode='reflect'): super().__init__() # norm arg is not functioning self.hop_length =", "forward STFT. fmax : int The ending frequency for the", "to mel bins. Default value is 128. hop_length : int", "topbin_check=False) # For normalization in the end freqs = fmin", "'hann'. center : bool Putting the STFT keneral at the", "self.register_buffer('mel_basis', mel_basis) # if trainable_mel==True: # self.mel_basis = torch.nn.Parameter(self.mel_basis) #", "= center self.pad_mode = pad_mode self.n_fft = n_fft self.power =", "0) elif self.pad_mode == 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x,output_format=None):", "the argument ``n_bins`` will be ignored and ``n_bins`` will be", "time domain to freq domain # These cqt_kernel is already", "real[:, :length] return real def extra_repr(self) -> str: return 'n_fft={},", "nn from torch.nn.functional import conv1d, conv2d, fold import numpy as", "memory... kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0) kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)),", "lenghts) cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1) cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1) if trainable: cqt_kernels_real", "the CQT n_filters = min(bins_per_octave, n_bins) self.n_octaves = int(np.ceil(float(n_bins) /", "``trainable_STFT``. It only support type-II DCT at the moment. Input", "be automatically broadcast to the right shape \"\"\" output_format =", "value is 'hann'. center : bool Putting the CQT keneral", "a2+b1 real = a1-b2 return (real/self.n_fft, imag/self.n_fft) class iSTFT(torch.nn.Module): \"\"\"This", "the forward STFT. fmin : int The starting frequency for", "This function is to calculate the CQT of the input", "``None``, therefore the higest CQT bin is inferred from the", "filter banks are trainable or not. If ``True``, the gradients", "center of the time-step or not. If ``False``, the time", "1992 alogrithm [2] [1] <NAME>. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC", "bool Putting the iSTFT keneral at the center of the", "save GPU memory. fmin : int The starting frequency for", "is 32.70Hz, which coresponds to the note C0. fmax :", "= 4 # size of a float epsilon = 10e-8", "during model training. Default value is ``False`` output_format : str", "torch tensor if verbose==True: print(\"Creating low pass filter ...\", end='\\r')", "= torch.max(log_spec, batch_wise_max - self.top_db) return log_spec def _dct(self, x,", "Create the window function and prepare the shape for batch-wise-time-wise", "part of the signal. \"\"\" x_real = broadcast_dim(x_real) x_imag =", "not. Default is ``False`` trainable_CQT : bool Determine if the", "---------------------------### class STFT(torch.nn.Module): \"\"\"This function is to calculate the short-time", "Mel filter banks will also be calculated and the Mel", "for the STFT. Default value is 2048 n_mels : int", "index is the beginning of the CQT kernel, if ``True``,", ":func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation\"\"\" pass # The", "linear spacing. center : bool Putting the STFT keneral at", "Vc = torch.rfft(v, 1, onesided=False) # TODO: Can make the", "Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010). [2]", "the CQT of the input signal. Input signal should be", "x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:] return x def extra_repr(self) -> str:", "def _power_to_db(self, S): ''' Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db for the original", "filter and other tensors def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None,", "slightly modify it so that it runs faster than the", "``True`` is used, the Mel scale is logarithmic. The default", "the full frequency spectrum, we make a small CQT kernel", "self.center: real = real[:, self.pad_amount:-self.pad_amount] else: if self.center: real =", "your intended waveform length. By default, ``length=None``, which will remove", "is used to calucate the correct ``fmin`` and ``fmax``. Setting", "mel_basis.shape melspec = melspec.detach().view(-1, n_mels, time) if random_start: pred_stft_shape =", "input shape = (batch, n_fft, 1) \"\"\" def __init__(self, n_fft=2048,", "12. trainable_STFT : bool Determine if the time to frequency", "low pass filter ...\", end='\\r') start = time() # self.lowpass_filter", "bool If your spectrograms only have ``n_fft//2+1`` frequency bins, please", "rand_phase) # Initializing the rebuilt magnitude spectrogram rebuilt = torch.zeros(*angles.shape,", "kernel_cos[1:-1].flip(0)), 0) if iSTFT: self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1)) self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1)) # Making", "Prepare the window sumsqure for division # Only need to", "= broadcast_dim(x) if self.center: if self.pad_mode == 'constant': padding =", "hop_length self.pad_mode = pad_mode self.n_bins = n_bins self.earlydownsample = earlydownsample", "shape of (batch, freq_bins, timesteps)\" # Initializing Random Phase rand_phase", "sr, fmin, n_bins, bins_per_octave, norm, window, fmax) self.register_buffer('lenghts', lenghts) cqt_kernels_real", "/= np.sqrt(N) * 2 V[:, :, 1:] /= np.sqrt(N /", "inverse(self, X, onesided=True, length=None, refresh_win=True): \"\"\" This function is same", "value is 'hann'. Please make sure the value is the", "by setting ``refresh_win=False``. Else please keep ``refresh_win=True`` \"\"\" if (hasattr(self,", "cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) if verbose==True:", "of waveforms to Mel spectrograms. Parameters ---------- x : torch", "abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation\"\"\" pass", "``(num_samples, freq_bins, time_steps)``; 'Complex' will return the STFT result in", "a small CQT kernel covering only the top octave. Then", "STFT in order to obtain the correct inverse. If trainability", "a1-b2 return (real/self.n_fft, imag/self.n_fft) class iSTFT(torch.nn.Module): \"\"\"This class is to", "'constant': # padding = nn.ConstantPad1d(self.n_fft//2, 0) # elif self.pad_mode ==", "torch.nn.Parameter(wcos, requires_grad=self.trainable) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) # Prepare the shape", "inverse x_imag.transpose_(1,2) # Prepare the right shape to do inverse", "https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db for the original implmentation. ''' log_spec = 10.0 *", "dict(lr=1e3, momentum=0.9) if sgd_kwargs: default_sgd_kwargs.update(sgd_kwargs) sgd_kwargs = default_sgd_kwargs mel_basis =", "b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1)) # compute real and imag", "Getting the top octave CQT x_down = x # Preparing", "self.fmin_t = fmin*2**(self.n_octaves-1) remainder = n_bins % bins_per_octave # print(\"remainder", "= a1-b2 return (real/self.n_fft, imag/self.n_fft) class iSTFT(torch.nn.Module): \"\"\"This class is", "this layer. Default value is 'cpu'. Returns ------- spectrogram :", "hop_length self.center = center self.pad_mode = pad_mode self.n_fft = n_fft", "bin. When `linear` or `log` is used, the bin spacing", "a1 - b2 real = real.squeeze(-2)*self.window_mask # Normalize the amplitude", "guarantee that the inverse is perfect, please use with extra", "= int(np.ceil(float(n_bins) / bins_per_octave)) # print(\"n_octaves = \", self.n_octaves) #", "= create_cqt_kernels(Q, sr, fmin, n_bins, bins_per_octave, norm, window, fmax) self.register_buffer('lenghts',", "in the middle, and paddings at the beginning # and", "freq_scale is ``no``, this argument does nothing. Please make sure", "The padding method. Default value is 'reflect'. htk : bool", "normalization. Default is ``1``, which is same as the normalization", "function only works for 1 single frame. i.e. input shape", "self.trainable = trainable self.output_format = output_format # It will be", ": str The windowing function for CQT. It uses ``scipy.signal.get_window``,", ") class CQT1992v2(torch.nn.Module): \"\"\"This function is to calculate the CQT", "and paddings at the beginning # and ending are required.", "\"\"\" Convert a batch of waveforms to Mel spectrograms. Parameters", "Default value if ``True``. pad_mode : str The padding method.", "torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1) # Decide if the", "if verbose==True: print(\"Creating CQT kernels ...\", end='\\r') start = time()", "return torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign for imaginary", "are trainable or not. If ``True``, the gradients for CQT", "torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def forward_manual(self,x): \"\"\"", "to obtain the final MFCCs. Therefore, the Mel spectrogram part", "* W_r - Vc[:, :, :, 1] * W_i if", "self.register_buffer('window_mask', window_mask) if verbose==True: print(\"iSTFT kernels created, time used =", "creation process is still same as the 1992 algorithm. Therefore,", "cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) # If", "Griffin-Lim.\" if onesided: X = extend_fbins(X) # extend freq X_real,", "then the argument ``n_bins`` will be ignored and ``n_bins`` will", "the same as the forward STFT. center : bool Putting", "if the Mel filter banks are trainable or not. If", "the shape that fits conv1d self.register_buffer('lowpass_filter', lowpass_filter[None,None,:]) if verbose==True: print(\"Low", "is the same as the forward STFT. fmin : int", "target.unsqueeze(1) if target.ndim == 3 else target loss = (pred", "end at Nyquist frequency with linear spacing. center : bool", "self.hop_length CQT = get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) # Getting", "= get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) # Getting the top", "# For normalization in the end freqs = fmin *", "`iSTFT=True` if you want to use `inverse`\") assert X.dim()==4 ,", "wsin = torch.tensor(kernel_sin * window) wcos = torch.tensor(kernel_cos * window)", "basis_norm : int Normalization for the CQT kernels. ``1`` means", "= x.shape N = x_shape[-1] v = torch.cat([x[:, :, ::2],", "= output_format # It will be used to calculate filter_cutoff", "htk=htk, norm=norm) mel_basis = torch.tensor(mel_basis) if verbose==True: print(\"STFT filter created,", "filter and make it a torch tensor if verbose==True: print(\"Creating", "spectrogram : torch.tensor It returns a batch of waveforms. Examples", "requires_grad=trainable_kernels) wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) else:", "is ``32`` hop_length : int The hop (or stride) size.", "sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs): super().__init__() self.melspec_layer", "spectrogram output type, either ``Magnitude``, ``Complex``, or ``Phase``. The output_format", "cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32)) if verbose==True: print(\"CQT kernels created, time used", "create_fourier_kernels(n_fft=n_fft, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr) self.wsin = torch.tensor(wsin,", "as n_fft, n_mels, hop_length, and window Returns ------- MFCCs :", "as nn from torch.nn.functional import conv1d, conv2d, fold import numpy", "The momentum for the update rule. The default value is", "the last axis, since dct applies to the frequency axis", "for normalizing basis self.hop_length = hop_length self.pad_mode = pad_mode self.n_bins", "else: pass if trainable_mel: # Making everything nn.Parameter, so that", "the value is the same as the forward STFT. sr", "real[:, self.pad_amount:self.pad_amount + length] else: real = real[:, :length] return", "the frequency domain cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1) cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1) if", "self.fmin_t, n_filters)) basis, self.n_fft, _ = create_cqt_kernels(Q, sr, self.fmin_t, n_filters,", "Only need to create this window once to save time", "default value is 'hann'. freq_scale : 'linear', 'log', or 'no'", "Examples -------- >>> spec_layer = Spectrogram.STFT() >>> specs = spec_layer(x)", "win_length self.stride = hop_length self.center = center self.pad_amount = self.n_fft", "a new variable for downsampling for i in range(self.n_octaves-1): hop", "nn.ReflectionPad1d(self.n_fft//2) x = padding(x) imag = conv1d(x, self.wsin, stride=self.stride) real", "torch.stack((CQT_real,CQT_imag),-1) elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return", "argument ``n_bins`` will be ignored and ``n_bins`` will be calculated", "Mel scale is quasi-logarithmic. When ``True`` is used, the Mel", "torch.log10(torch.max(self.amin, self.ref)) if self.top_db is not None: if self.top_db <", "log_spec so that it can be broadcasted batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1)", "the arguments follow the convention from librosa. This class inherits", "= torch.tensor(kernel_sin, dtype=torch.float) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float) # In this", "Parameters ---------- onesided : bool If your spectrograms only have", "= pad_mode self.output_format = output_format # creating kernels for CQT", "sure the value is the same as the forward STFT.", "model training. Default value is ``False``. trainable_window : bool Determine", "calculate the correct ``fmin`` and ``fmax``. Setting the correct sampling", ":, 0], X[:, :, :, 1] # broadcast dimensions to", "in inverse self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1)) if verbose==True: print(\"STFT kernels created, time", "``None`` which is equivalent to ``n_fft//4``. Please make sure the", "``None``, which means ``n_fft//2+1`` bins. hop_length : int The hop", "n_bins=84, trainable_STFT=False, trainable_CQT=False, bins_per_octave=12, output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'): super().__init__()", "that this model can support nn.DataParallel mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel)", "which is to convert spectrograms back to waveforms. It only", "so that this model can support nn.DataParallel mel_basis = torch.nn.Parameter(mel_basis,", "n_fft self.win_length = win_length self.n_iter = n_iter self.center = center", "with nn.Parallel # self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable) # self.kernel_cos =", "if self.center: if self.pad_mode == 'constant': padding = nn.ConstantPad1d(self.kernel_width//2, 0)", "pad_mode : str The padding method. Default value is 'reflect'.", "<= 0: raise ParameterError('amin must be strictly positive') amin =", "the Mel filter banks are trainable or not. If ``True``,", "to be return. Can be either ``Magnitude`` or ``Complex`` or", "kernels are trainable if trainable_kernels: # Making all these variables", "used for _power_to_db if amin <= 0: raise ParameterError('amin must", "x_down = x # Preparing a new variable for downsampling", "self._power_to_db(x) x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:] return x def extra_repr(self) ->", "please keep ``refresh_win=True`` \"\"\" if (hasattr(self, 'kernel_sin_inv') != True) or", "used to normalize the final CQT result by dividing n_fft", "alogrithm [2] [1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC", "sgd_kwargs[\"lr\"] = sgd_kwargs[\"lr\"] * batch_size optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs) losses", "CQT bin. Default is 32.70Hz, which coresponds to the note", "- torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 *", "the calculation of a constant Q transform.” (1992). early downsampling", "bins Please make sure the value is the same as", "---------- x : torch tensor Input signal should be in", "(np.r_[0:n_bins] / np.float(bins_per_octave)) lenghts = np.ceil(Q * sr / freqs)", "size of a float epsilon = 10e-8 # fudge factor", "if ``True``. Please make sure the value is the same", "freq_bins, timesteps, 2)\" # If the input spectrogram contains only", "the window sum square. If you have an input with", "training. Default value is ``False`` output_format : str Control the", "n_fft # Use extend_fbins function to get back another half", "# We will activate early downsampling later if possible self.trainable", "please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. Parameters ---------- onesided : bool If your", "fmax_t > sr/2: raise ValueError('The top bin {}Hz has exceeded", "algorithm to connect all the frames real = overlap_add(real, self.stride)", "mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel) self.register_parameter('mel_basis', mel_basis) else: self.register_buffer('mel_basis', mel_basis) #", "filter ...\", end='\\r') start = time() sr, self.hop_length, self.downsample_factor, early_downsample_filter,", "between each frequency bin. When `linear` or `log` is used,", "hop = self.hop_length CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding)", "once to save time # Unless the input spectrograms have", "back the time axis and freq axis def forward(self, x):", "bottom bins # print(\"downsample_factor = \",self.downsample_factor) # print(CQT.shape) # print(self.lenghts.view(-1,1).shape)", "def _dct(self, x, norm=None): ''' Refer to https://github.com/zh217/torch-dct for the", "'hann'. Please make sure the value is the same as", "b1 = conv1d(x_imag, self.wcos, stride=self.stride) b2 = conv1d(x_imag, self.wsin, stride=self.stride)", "_, window_mask = create_fourier_kernels(n_fft, win_length=win_length, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax,", "kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.", "the STFT result, shape = ``(num_samples, freq_bins, time_steps)``; 'Complex' will", "results with a gigantic CQT kernel covering the full frequency", "'reflect': padding = nn.ReflectionPad1d(self.n_fft//2) x = padding(x) imag = conv1d(x,", "works for the complex value spectrograms. If you have the", "torch.stack((phase_real,phase_imag), -1) def extra_repr(self) -> str: return 'STFT kernel size", "X[:, :, :, 0], X[:, :, :, 1] # broadcast", "the time index is the center of the CQT kernel.", "[2] [1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.”", "speed by setting ``refresh_win=False``. Else please keep ``refresh_win=True`` \"\"\" if", "elements, which leads to error in calculating phase def inverse(self,", "win_length=win_length, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=False) window_mask =", "used to calucate the correct ``fmin`` and ``fmax``. Setting the", "for the calculation of a constant Q transform.” (1992). Parameters", "downsampling if this argument is True if verbose==True: print(\"Creating early", "usage is same as ``torch.nn.Module``. Parameters ---------- n_fft : int", "print(\"Creating CQT kernels ...\", end='\\r') start = time() # print(\"Q", "Audio and Acoustics (pp. 1-4), Oct. 2013. Parameters ---------- n_fft", "phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def", "Control the spectrogram output type, either ``Magnitude``, ``Complex``, or ``Phase``.", "make freq the last axis, since dct applies to the", "CQT elif output_format=='Phase': phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return", "`linear` or `log` is used, the bin spacing can be", "'ortho'. Normalization for DCT basis **kwargs Other arguments for Melspectrogram", "sr=22050, trainable=False, output_format=\"Complex\", verbose=True): super().__init__() # Trying to make the", "type. ``Magnitude`` will return the magnitude of the STFT result,", "either ``Magnitude``, ``Complex``, or ``Phase``. The output_format can also be", "specs = spec_layer(x) \"\"\" # To DO: # need to", "self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding) CQT =", "sr=22050, n_fft=2048, n_mels=128, hop_length=512, window='hann', center=True, pad_mode='reflect', power=2.0, htk=False, fmin=0.0,", "self.n_iter = n_iter self.center = center self.pad_mode = pad_mode self.momentum", "create_fourier_kernels(self.kernel_width, window='ones', freq_scale='no') # Converting kernels from numpy arrays to", "-1) def forward_manual(self,x): \"\"\" Method for debugging \"\"\" x =", "onesided=True, length=None, refresh_win=True): \"\"\" This function is same as the", "= ``(num_samples, freq_bins,time_steps, 2)``. The complex number is stored as", "band_center = 0.50, kernelLength=256, transitionBandwidth=0.001) ) # Broadcast the tensor", "is ``False``. trainable_STFT : bool Determine if the STFT kenrels", "time index is the beginning of the iSTFT kernel, if", "= (pred - target).pow(2).sum(-2).mean() return loss verbose = verbose or", "to the Fourier kernels window_mask = torch.tensor(window_mask) wsin = kernel_sin", "n_filters = min(bins_per_octave, n_bins) self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave)) #", "tensor of spectrograms. ``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``; ``shape", "# = (-b2, b1) a1 = conv1d(x_real, self.wcos, stride=self.stride) a2", "torch.nn.Parameter(wcos, requires_grad=trainable_kernels) self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) else: self.register_buffer('wsin', wsin) self.register_buffer('wcos',", "kernel_sin * window_mask wcos = kernel_cos * window_mask if self.trainable==False:", "STFT. fmax : int The ending frequency for the highest", "= mel_basis @ pred_stft loss = loss_fn(pred_mel, melspec) losses.append(loss.item()) loss.backward()", "mel_inversion_params = mel_inversion_params or {} stft_inversion_params = stft_inversion_params or {}", "function for STFT. It uses ``scipy.signal.get_window``, please refer to scipy", "forward STFT. sr : int The sampling rate for the", "Mel -> STFT optimization\") if loss_threshold and loss < loss_threshold:", "# Saving previous rebuilt magnitude spec # spec2wav conversion #", ", \"Please make sure your input is in the shape", "if pred.ndim == 3 else pred target = target.unsqueeze(1) if", "for normalization ### --------------------------- Spectrogram Classes ---------------------------### class STFT(torch.nn.Module): \"\"\"This", "done, n_fft = \",self.n_fft) # Preparing kernels for Short-Time Fourier", "class CQT(CQT1992v2): \"\"\"An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the", "return real def extra_repr(self) -> str: return 'n_fft={}, Fourier Kernel", "(hasattr(self, 'kernel_cos_inv') != True): raise NameError(\"Please activate the iSTFT module", "the shape of (batch, freq_bins, timesteps, 2)\" # If the", "rebuilt magnitude spec # spec2wav conversion # print(f'win_length={self.win_length}\\tw={self.w.shape}') inverse =", ": str Determine the return type. ``Magnitude`` will return the", "reuse the code from the 1992 alogrithm [2] [1] <NAME>.", "print(\"Creating CQT kernels ...\", end='\\r') start = time() basis, self.n_fft,", "window='hann', freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False, trainable_window=False, verbose=True, refresh_win=True):", "``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``. This alogrithm", "get_early_downsample_params(sr, hop_length, fmax_t, Q, self.n_octaves, verbose) self.register_buffer('early_downsample_filter', early_downsample_filter) if verbose==True:", "hop (or stride) size. Default value is 512. fmin :", "or ``Phase``. The output_format can also be changed during the", "= center self.pad_mode = pad_mode self.n_fft = n_fft # Create", "if ``output_format='Complex' or 'Phase'``; Examples -------- >>> spec_layer = Spectrogram.CQT1992v2()", "L1 normalization, and ``2`` means L2 normalization. Default is ``1``,", "= CQT*self.downsample_factor # Normalize again to get same result as", "basis **kwargs Other arguments for Melspectrogram such as n_fft, n_mels,", "self.register_buffer('wcos', wcos) if trainable_CQT: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag =", "* tprev[:,:,:] # Phase normalization angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16)", "broadcast to the right shape \"\"\" x = broadcast_dim(x) if", "self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) # Getting the top octave CQT", "print(\"Q = {}, fmin_t = {}, n_filters = {}\".format(Q, self.fmin_t,", "self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices]) # Remove padding if length is None:", "= x.shape[-1] x = broadcast_dim(x) if self.center: if self.pad_mode ==", "downsampled input is equavalent to the next lower octave. The", "momentum for the update rule. The default value is ``0.99``.", "self.wsin = torch.nn.Parameter(self.wsin) # self.wcos = torch.nn.Parameter(self.wcos) def forward(self, x):", "1 single frame. i.e. input shape = (batch, n_fft, 1)", "normalizing the phase # Using the final phase to reconstruct", "= padding(x) # STFT fourier_real = conv1d(x, self.wcos, stride=self.hop_length) fourier_imag", "No need for center self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window,", "'n_mfcc = {}'.format( (self.n_mfcc) ) class CQT1992(torch.nn.Module): \"\"\" This alogrithm", "input spectrograms have different time steps if hasattr(self, 'w_sum')==False or", "create_fourier_kernels(self.n_fft, window='ones', freq_scale='no') wsin = kernel_sin * window wcos =", "= pad_mode self.n_bins = n_bins self.output_format = output_format self.earlydownsample =", "If the input spectrogram contains only half of the n_fft", "frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins Please", "to https://github.com/zh217/torch-dct for the original implmentation. ''' x = x.permute(0,2,1)", "<NAME>. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010). [2] Brown,", "algorithm, that is why I call it version 2. [1]", "time # Unless the input spectrograms have different time steps", "max gradient of {grad_threshold} reached. Stopping optimization.\") break pred_stft =", "kernelLength=256, transitionBandwidth=0.001 ) ) # Broadcast the tensor to the", "freq_bins,time_steps, 2)``; ``Phase`` will return the phase of the STFT", "range(max_steps): optimizer.zero_grad() pred_mel = mel_basis @ pred_stft loss = loss_fn(pred_mel,", "X_real, X_imag = X[:, :, :, 0], X[:, :, :,", "n_mels, hop_length, and window Returns ------- MFCCs : torch.tensor It", "angles, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, center=self.center) # wav2spec conversion rebuilt", "starting frequency for the lowest Mel filter bank. fmax :", "torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0])) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self) -> str: return 'STFT", "STFT. freq_scale : 'linear', 'log', or 'no' Determine the spacing", "= win_length self.stride = hop_length self.center = center self.pad_amount =", "initialize this layer. Default value is 'cpu' Returns ------- spectrogram", "if hop_length==None: hop_length = int(win_length // 4) self.n_fft = n_fft", "default_mel_inversion_params = {} default_stft_inversion_params = {} mel_inversion_params = mel_inversion_params or", "window : str The windowing function for CQT. It uses", "Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation", "{}\".format(Q, self.fmin_t, n_filters)) basis, self.n_fft, _ = create_cqt_kernels(Q, sr, self.fmin_t,", "loss = loss_fn(pred_mel, melspec) losses.append(loss.item()) loss.backward() optimizer.step() # Check conditions", "verbose==True: print(\"CQT kernels created, time used = {:.4f} seconds\".format(time()-start)) #", "spec_layer(x) \"\"\" def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84, trainable_STFT=False,", "the STFT result, shape = ``(num_samples, freq_bins,time_steps)``; ``Complex`` will return", "int To make sure the inverse STFT has the same", "\"\"\" def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512, window='hann', center=True, pad_mode='reflect',", "PROCESSING.” (2010). [2] Brown, <NAME>. and <NAME>. “An efficient algorithm", "torch.tensor It returns a tensor of spectrograms. ``shape = (num_samples,", "same as librosa if win_length==None: win_length = n_fft if hop_length==None:", "forward STFT. momentum : float The momentum for the update", "= padding(x_real) # x_imag = padding(x_imag) # Watch out for", "CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) elif output_format=='Complex': return CQT elif output_format=='Phase':", "`inverse`\") assert X.dim()==4 , \"Inverse iSTFT only works for complex", "next lower octave. The kernel creation process is still same", "ref = torch.abs(torch.tensor([ref])) self.register_buffer('amin', amin) self.register_buffer('ref', ref) self.top_db = top_db", "it can be used later in inverse self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1)) if", "for the update rule. The default value is ``0.99``. device", "n_fft : int The window size for the STFT. Default", "shape \"\"\" output_format = output_format or self.output_format x = broadcast_dim(x)", "CQT*torch.sqrt(self.lenghts.view(-1,1,1)) if output_format=='Magnitude': # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) elif", "print(f\"Target max gradient of {grad_threshold} reached. Stopping optimization.\") break pred_stft", "win_length=win_length, freq_bins=freq_bins, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=verbose) kernel_sin =", "Amplitude CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) return CQT*torch.sqrt(self.lenghts.view(-1,1)) class CQT2010v2(torch.nn.Module): \"\"\"This function", "which is equivalent to ``n_fft//4``. window : str The windowing", "either of the following shapes.\\n 1. ``(len_audio)``\\n 2. ``(num_audio, len_audio)``\\n", "bin. If freq_scale is ``no``, this argument does nothing. sr", "int The number of Mel filter banks. The filter banks", "rebuilt = torch.zeros(*angles.shape, device=self.device) for _ in range(self.n_iter): tprev =", "end='\\r') start = time() cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q, sr,", "stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params} recon_stft = self.to_stft(melspec, **mel_inversion_params) recon_audio =", "and freq axis def forward(self, x): \"\"\" Convert a batch", "do not share the same memory... kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)),", "window = create_fourier_kernels(self.kernel_width, window='ones', freq_scale='no') # Converting kernels from numpy", "reduce the CQT kernel size. The result with and without", "x # Preparing a new variable for downsampling for i", "\"\"\" This function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class, which", "sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84, trainable_STFT=False, trainable_CQT=False, bins_per_octave=12, output_format='Complex', norm=1,", "single frame. i.e. input shape = (batch, n_fft, 1) \"\"\"", "value is the same as the forward STFT. freq_scale :", "real def extra_repr(self) -> str: return 'n_fft={}, Fourier Kernel size={},", "window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=False) window_mask = get_window(window,int(win_length), fftbins=True)", "the final CQT result by dividing n_fft # basis_norm is", "highest Mel filter bank. trainable_mel : bool Determine if the", "= conv1d(x, self.cqt_kernels_real, stride=self.hop_length) CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) #", "possible self.trainable = trainable self.output_format = output_format # It will", "(self.momentum / (1 + self.momentum)) * tprev[:,:,:] # Phase normalization", ": int The number of Mel-frequency cepstral coefficients norm :", "discrete cosine transform is calcuated to obtain the final MFCCs.", "def extra_repr(self) -> str: return 'n_fft={}, Fourier Kernel size={}, iSTFT={},", "the Mel spectrogram part can be made trainable using ``trainable_mel``", "-kernel_sin[1:-1].flip(0)), 0) kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0) if iSTFT: self.register_buffer('kernel_sin_inv',", "self.wcos = torch.tensor(wcos, dtype=torch.float) def forward(self,x): \"\"\" Convert a batch", "+ self.momentum)) * tprev[:,:,:] # Phase normalization angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1)", "(-b2, b1) a1 = conv1d(x_real, self.wcos, stride=self.stride) a2 = conv1d(x_real,", "\"\"\"This function is to calculate the Melspectrogram of the input", "to ``n_fft//4``. Please make sure the value is the same", "= pred_stft.view((*shape[:-2], n_freq, time)) if return_extras: return pred_stft, pred_mel.detach(), losses", "to make the default setting same as librosa if win_length==None:", "frequency range in the CQT filter instead of here. if", "x = padding(x) # STFT fourier_real = conv1d(x, self.wcos, stride=self.hop_length)", "int The starting frequency for the lowest frequency bin. If", "``False`` output_format : str Determine the return type. 'Magnitude' will", "of 2 to convoluting it with the small CQT kernel.", "pad_mode self.n_fft = n_fft # Create filter windows for stft", "'constant': padding = nn.ConstantPad1d(self.kernel_width//2, 0) elif self.pad_mode == 'reflect': padding", "if length is None: if self.center: real = real[:, self.pad_amount:-self.pad_amount]", "CQT kernels Q = 1/(2**(1/bins_per_octave)-1) # Creating lowpass filter and", "the ``inverse`` method under the ``STFT`` class to save GPU/RAM", "the right shape to do inverse # if self.center: #", "of iterations for Griffin-Lim. The default value is ``32`` hop_length", "Vc[:, :, :, 1] * W_i if norm == 'ortho':", "# If the input spectrogram contains only half of the", "int The number of Mel-frequency cepstral coefficients norm : string", "bin frequency fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave) else: # Calculate the top", "prints. device : str Choose which device to initialize this", "in range(self.n_octaves-1): hop = hop//2 x_down = downsampling_by_2(x_down, self.lowpass_filter) CQT1", "everything nn.Parameter, so that this model can support nn.DataParallel mel_basis", "self.register_buffer('lowpass_filter', lowpass_filter[None,None,:]) if verbose==True: print(\"Low pass filter created, time used", "fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins if fmax_t >", "class inherits from ``torch.nn.Module``, therefore, the usage is same as", ": bool To activate the iSTFT module or not. By", "W_i trainable here k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :]", "the iSTFT kernel. Default value if ``True``. Please make sure", "these variables trainable kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels) kernel_cos = torch.nn.Parameter(kernel_cos,", "int The number of iterations for Griffin-Lim. The default value", "or {} if mel_inversion_params: mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params} if stft_inversion_params:", "is 12. norm : int Normalization for the CQT kernels.", "ending frequency for the highest Mel filter bank. trainable_mel :", "fmin, fmax, htk=htk, norm=norm) mel_basis = torch.tensor(mel_basis) if verbose==True: print(\"STFT", "the time to frequency domain transformation kernel for the input", "magnitude spectrograms to waveforms. Parameters ---------- S : torch tensor", "== 'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x,output_format=None): \"\"\" Convert a", "= downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor) hop = self.hop_length CQT = get_cqt_complex(x,", "was made avaliable. The inverse function only works for 1", "of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins", "angles[:,:,:,1] = torch.sin(2 * np.pi * rand_phase) # Initializing the", "\\ \"make sure our tensor is in the shape of", "Mel spectrogram part can be made trainable using ``trainable_mel`` and", "or ``Complex`` or ``Phase``. Default value is ``Complex``. \"\"\" output_format", "in range(max_steps): optimizer.zero_grad() pred_mel = mel_basis @ pred_stft loss =", "``(num_audio, 1, len_audio)`` It will be automatically broadcast to the", "be updated during model training. Default value is ``False``. trainable_STFT", "iSTFT kernel, if ``True``, the time index is the center", "the CQT keneral at the center of the time-step or", "make sure the value is the same as the forward", "window=self.w, center=self.center) # wav2spec conversion rebuilt = torch.stft(inverse, self.n_fft, self.hop_length,", "band_center = 0.50, # kernelLength=256, # transitionBandwidth=0.001)) lowpass_filter = torch.tensor(create_lowpass_filter(", "x[:, :, 1::2].flip([2])], dim=2) Vc = torch.rfft(v, 1, onesided=False) #", "process is still same as the 1992 algorithm. Therefore, we", "x_down = downsampling_by_2(x_down, self.lowpass_filter) CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop,", "0.5 pred_stft = pred_stft.view((*shape[:-2], n_freq, time)) if return_extras: return pred_stft,", "will be used to calculate filter_cutoff and creating CQT kernels", "This will be used to calculate filter_cutoff and creating CQT", "{}, CQT kernel size = {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) ) class", "CQT1992v2(torch.nn.Module): \"\"\"This function is to calculate the CQT of the", "= {:.4f} seconds\".format(time()-start)) else: self.downsample_factor=1. # Preparing CQT kernels if", "== 3 else pred target = target.unsqueeze(1) if target.ndim ==", "Control the type of spectrogram to be return. Can be", "time index is the center of the STFT kernel. Default", "bins. Default is 84. Will be ignored if ``fmax`` is", "self.n_octaves, verbose) self.register_buffer('early_downsample_filter', early_downsample_filter) if verbose==True: print(\"Early downsampling filter created,", ":, :, 1] * W_i if norm == 'ortho': V[:,", "how many resampling requires for the CQT n_filters = min(bins_per_octave,", "conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1)) # compute real and imag part. signal", "to Audio and Acoustics (pp. 1-4), Oct. 2013. Parameters ----------", "epsilon = 10e-8 # fudge factor for normalization ### ---------------------------", ":, :, 0], X[:, :, :, 1] # broadcast dimensions", "2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave)) lenghts = np.ceil(Q * sr", "the final phase to reconstruct the waveforms inverse = torch.istft(S.unsqueeze(-1)", "= create_fourier_kernels(self.n_fft, window='ones', freq_scale='no') wsin = kernel_sin * window wcos", "= torch.tensor(get_window(window, int(self.win_length), fftbins=True), device=device).float() def forward(self, S): \"\"\" Convert", "value is 'reflect'. trainable : bool Determine if the CQT", "Most of the arguments follow the convention from librosa. This", "domain transformation kernel for the input audio is trainable or", "of the shape ``(batch, n_fft//2+1, timesteps)`` \"\"\" assert S.dim()==3 ,", "stride=self.stride) real = conv1d(x, self.wcos, stride=self.stride) return (real, -imag) def", "if refresh_win==None: refresh_win=self.refresh_win assert X.dim()==4 , \"Inverse iSTFT only works", "= \",self.n_fft) # Preparing kernels for Short-Time Fourier Transform (STFT)", ":, 1:] /= np.sqrt(N / 2) * 2 V =", "\"\"\" This algorithm is using the resampling method proposed in", "= earlydownsample # TODO: activate early downsampling later if possible", "``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the", "calculation of a constant Q transform.” (1992). Parameters ---------- sr", "It returns a tensor of MFCCs. shape = ``(num_samples, n_mfcc,", "imag)`` in the last axis. Default value is 'Magnitude'. verbose", "will be automatically broadcast to the right shape output_format :", "value spectrograms. If you have the magnitude spectrograms, please use", "x def extra_repr(self) -> str: return 'n_mfcc = {}'.format( (self.n_mfcc)", "the shape ``(batch, n_fft//2+1, timesteps)`` \"\"\" assert S.dim()==3 , \"Please", "will return the phase of the STFT reuslt, shape =", "self.hop_length, win_length=self.win_length, window=self.w, pad_mode=self.pad_mode) # Phase update rule angles[:,:,:] =", "multiplication # Create filter windows for inverse kernel_sin, kernel_cos, _,", "wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable) wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable) self.register_parameter('wsin', wsin)", "torch.tensor(wsin, dtype=torch.float) self.wcos = torch.tensor(wcos, dtype=torch.float) def forward(self,x): \"\"\" Convert", "extra care. Parameters ---------- n_fft : int The window size.", "trainable_window=False, verbose=True, refresh_win=True): super().__init__() # Trying to make the default", "norm : int Normalization for the CQT kernels. ``1`` means", "// 2 self.refresh_win = refresh_win start = time() # Create", "self.freq_bins = freq_bins self.trainable = trainable self.pad_amount = self.n_fft //", "torch.empty((*S.shape,2), device=self.device) angles[:, :,:,0] = torch.cos(2 * np.pi * rand_phase)", "the frequency axis x_shape = x.shape N = x_shape[-1] v", "real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices]) # Remove padding if length is None: if self.center:", "# Check conditions if not loss.isfinite(): raise OverflowError(\"Overflow encountered in", "from time import time from nnAudio.librosa_functions import * from nnAudio.utils", "can be controlled by ``fmin`` and ``fmax``. If 'no' is", "filter banks. The filter banks maps the n_fft to mel", "CQT),1) CQT = CQT[:,-self.n_bins:,:] # Removing unwanted bottom bins #", "STFT kernels will be updated during model training. Default value", "kernel for the input audio is trainable or not. Default", "CQT*torch.sqrt(self.lenghts.view(-1,1)) class CQT2010v2(torch.nn.Module): \"\"\"This function is to calculate the CQT", "= torch.tensor(wcos, dtype=torch.float) def forward(self,x): \"\"\" Convert a batch of", "STFT. Default value is 2048 n_mels : int The number", "print(\"Creating low pass filter ...\", end='\\r') start = time() #", "The result with and without early downsampling are more or", ": int Number of frequency bins. Default is ``None``, which", "Removing unwanted top bins if self.norm: CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1)) else:", "calculated automatically. Default is ``None`` n_bins : int The total", "X_real_bc = X_real.unsqueeze(1) X_imag_bc = X_imag.unsqueeze(1) a1 = conv2d(X_real_bc, self.kernel_cos,", "extend_fbins(X) # extend freq X_real, X_imag = X[:, :, :,", "norm == 'ortho': V[:, :, 0] /= np.sqrt(N) * 2", "Preparing CQT kernels if verbose==True: print(\"Creating CQT kernels ...\", end='\\r')", "else: return torch.sqrt(spec) elif output_format=='Complex': return torch.stack((spec_real,-spec_imag), -1) # Remember", "self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) # If center==True, the STFT window", "Number of bins per octave. Default is 12. norm :", "\"\"\"This function is to calculate the CQT of the input", "hop_length=None, window='hann', freq_scale='no', center=True, pad_mode='reflect', iSTFT=False, fmin=50, fmax=6000, sr=22050, trainable=False,", "calculate the Melspectrogram of the input signal. Input signal should", "part of the signal. x_imag : torch tensor Imaginary part", "<NAME>. and <NAME>. “An efficient algorithm for the calculation of", "contains only half of the n_fft # Use extend_fbins function", ": bool Determine if the frequency domain CQT kernel is", "def forward(self, x): \"\"\" Convert a batch of waveforms to", "optimizer.step() # Check conditions if not loss.isfinite(): raise OverflowError(\"Overflow encountered", "return x def extra_repr(self) -> str: return 'n_mfcc = {}'.format(", "without early downsampling are more or less the same except", "here. if verbose==True: print(\"Creating STFT kernels ...\", end='\\r') start =", "freq_bins=None, hop_length=None, window='hann', freq_scale='no', center=True, pad_mode='reflect', iSTFT=False, fmin=50, fmax=6000, sr=22050,", "# need to deal with the filter and other tensors", "and <NAME>. “An efficient algorithm for the calculation of a", "trainable_kernels : bool Determine if the STFT kenrels are trainable", "under the ``STFT`` class to save GPU/RAM memory. When ``trainable=True``", "= {}, n_filters = {}\".format(Q, self.fmin_t, n_filters)) basis, self.n_fft, _", "= ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``; Examples --------", "int The window size. Default value is 2048. n_iter=32 :", "kernel_sin = torch.tensor(kernel_sin, dtype=torch.float) kernel_cos = torch.tensor(kernel_cos, dtype=torch.float) # In", "else: pass def forward(self, x, output_format=None): \"\"\" Convert a batch", "'reflect': self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x, output_format=None): \"\"\" Convert a", "= create_fourier_kernels(n_fft, win_length=win_length, freq_bins=n_fft, window=window, freq_scale=freq_scale, fmin=fmin, fmax=fmax, sr=sr, verbose=False)", "< grad_threshold: if verbose: print(f\"Target max gradient of {grad_threshold} reached.", "verbose==True: print(\"Creating low pass filter ...\", end='\\r') start = time()", "value is the same as the forward STFT. window :", "are trainable or not. If ``True``, the gradients for STFT", "shape = ``(num_samples, freq_bins, time_steps)``; 'Complex' will return the STFT", "used = {:.4f} seconds\".format(time()-start)) else: pass if trainable_mel: # Making", "center : bool Putting the iSTFT keneral at the center", "cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1] print(\"CQT kernels created, time used = {:.4f}", "torch.tensor(basis.real.astype(np.float32)).unsqueeze(1) cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1) if trainable: cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)", "or self.output_format x = broadcast_dim(x) if self.earlydownsample==True: x = downsampling_by_n(x,", "num of filter requires for the kernel # n_octaves determines", "either ``Magnitude`` or ``Complex`` or ``Phase``. Default value is ``Complex``.", "earlydownsample # We will activate early downsampling later if possible", "the start and the end of the output. refresh_win :", "None: if self.center: real = real[:, self.pad_amount:-self.pad_amount] else: if self.center:", "kernels ...\", end='\\r') start = time() kernel_sin, kernel_cos, self.bins2freq, _,", "time steps if hasattr(self, 'w_sum')==False or refresh_win==True: self.w_sum = torch_window_sumsquare(self.window_mask.flatten(),", "freq domain # These cqt_kernel is already in the frequency", "``True``, the gradients for CQT kernels will also be caluclated", "= freq_bins self.trainable = trainable self.pad_amount = self.n_fft // 2", "\"\"\" x = self.melspec_layer(x) x = self._power_to_db(x) x = self._dct(x,", "is the center of the CQT kernel. Default value if", "torch tensor Input signal should be in either of the", "phase to reconstruct the waveforms inverse = torch.istft(S.unsqueeze(-1) * angles,", "# same mag as 1992 CQT = CQT*self.downsample_factor # Normalize", "torch.nn.Parameter(mel_basis, requires_grad=trainable_mel) self.register_parameter('mel_basis', mel_basis) else: self.register_buffer('mel_basis', mel_basis) # if trainable_mel==True:", "if ``fmax`` is not ``None``. bins_per_octave : int Number of", "center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT, output_format=\"Magnitude\", verbose=verbose, **kwargs) # Create filter", "norm : bool Normalization for the CQT result. basis_norm :", "hop_length # Creating window function for stft and istft later", "please use ``refresh_win=None`` to increase computational speed. \"\"\" if refresh_win==None:", "hop_length==None: self.hop_length = n_fft//4 else: self.hop_length = hop_length # Creating", "window size. Default value is 2048. freq_bins : int Number", "= torch.tensor(lenghts).float() self.register_buffer('lenghts', lenghts) self.basis=basis fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert", "class Griffin_Lim(torch.nn.Module): \"\"\" Converting Magnitude spectrograms back to waveforms based", "n_filters)) basis, self.n_fft, _ = create_cqt_kernels(Q, sr, self.fmin_t, n_filters, bins_per_octave,", "Prepare the right shape to do inverse x_imag.transpose_(1,2) # Prepare", "2. [1] Brown, <NAME>. and <NAME>. “An efficient algorithm for", "same as the :func:`~nnAudio.Spectrogram.iSTFT` class, which is to convert spectrograms", "# Create the window function and prepare the shape for", "= torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)) return CQT*torch.sqrt(self.lenghts.view(-1,1)) class CQT2010v2(torch.nn.Module): \"\"\"This function is to", "self.bins2freq, _, window = create_fourier_kernels(self.kernel_width, window='ones', freq_scale='no') # Converting kernels", "return_extras=False, verbose=None): \"\"\" Best-attempt spectrogram inversion \"\"\" def loss_fn(pred, target):", "sure our tensor is in the shape of (batch, freq_bins,", "= torch.istft(S.unsqueeze(-1) * angles, self.n_fft, self.hop_length, win_length=self.win_length, window=self.w, center=self.center) #", "axis x_shape = x.shape N = x_shape[-1] v = torch.cat([x[:,", "all these variables nn.Parameter, so that the model can be", "the forward STFT. window : str The windowing function for", "spectrograms. shape = ``(num_samples, freq_bins,time_steps)``. Examples -------- >>> spec_layer =", "The complex number is stored as ``(real, imag)`` in the", "downsampling later if possible # This will be used to", "verbose==True: print(\"Creating early downsampling filter ...\", end='\\r') start = time()", "length=None, refresh_win=True): \"\"\" This function is same as the :func:`~nnAudio.Spectrogram.iSTFT`", "self.early_downsample_filter, self.downsample_factor) hop = self.hop_length CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag,", "if stft_inversion_params: stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params} recon_stft = self.to_stft(melspec, **mel_inversion_params)", "padding(x) spec_imag = conv1d(x, self.wsin, stride=self.stride) spec_real = conv1d(x, self.wcos,", "the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. The parameters (e.g. n_fft,", "magnitude of the STFT result, shape = ``(num_samples, freq_bins, time_steps)``;", "bool When ``False`` is used, the Mel scale is quasi-logarithmic.", "{:.4f} seconds\".format(time()-start)) else: self.downsample_factor=1. # Preparing CQT kernels if verbose==True:", "self.melspec_layer(x) x = self._power_to_db(x) x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:] return x", "return 'n_mfcc = {}'.format( (self.n_mfcc) ) class CQT1992(torch.nn.Module): \"\"\" This", "the input signal. Input signal should be in either of", "please set `length` as your intended waveform length. By default,", "self.padding = nn.ReflectionPad1d(self.n_fft//2) def forward(self,x, output_format=None): \"\"\" Convert a batch", "half of the n_fft # Use extend_fbins function to get", "updated during model training. Default value is ``False``. verbose :", "if output_format=='Magnitude': # Getting CQT Amplitude return torch.sqrt(CQT.pow(2).sum(-1)) elif output_format=='Complex':", "be updated during model training. Default value is ``False`` output_format", "self.trainable = trainable self.stride = hop_length self.center = center self.pad_mode", "else: if self.center: real = real[:, self.pad_amount:self.pad_amount + length] else:", "for calculating the correct frequency. n_mfcc : int The number", "does nothing. sr : int The sampling rate for the", "return CQT*torch.sqrt(self.lenghts.view(-1,1)) class CQT2010v2(torch.nn.Module): \"\"\"This function is to calculate the", "n_fft to mel bins. Default value is 128. hop_length :", "at the moment. Input signal should be in either of", "= output_format or self.output_format self.num_samples = x.shape[-1] x = broadcast_dim(x)", "debugging \"\"\" x = broadcast_dim(x) if self.center: if self.pad_mode ==", "self.momentum)) * tprev[:,:,:] # Phase normalization angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) +", "ending are required. if self.pad_mode == 'constant': self.padding = nn.ConstantPad1d(self.n_fft//2,", "information. If ``False``, it suppresses all prints Returns ------- spectrogram", "(batch, freq_bins, timesteps, 2)\" # If the input spectrogram contains", "used to calculate filter_cutoff and creating CQT kernels Q =", "torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1) # kernel_sin and kernel_cos have the shape (freq_bins, 1,", "time() cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q, sr, fmin, n_bins, bins_per_octave,", "CQT2010(torch.nn.Module): \"\"\" This algorithm is using the resampling method proposed", "self.wcos, stride=self.stride) a2 = conv1d(x_real, self.wsin, stride=self.stride) b1 = conv1d(x_imag,", "is 'cpu' \"\"\" def __init__(self, n_fft, n_iter=32, hop_length=None, win_length=None, window='hann',", "imaginary part elif output_format=='Phase': return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0", "if ``True``, the time index is the center of the", "(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``; Examples -------- >>>", "Examples -------- >>> spec_layer = Spectrogram.MelSpectrogram() >>> specs = spec_layer(x)", "len_audio)`` The correct shape will be inferred autommatically if the", "complex number, shape = ``(num_samples, freq_bins, time_steps, 2)``; 'Phase' will", ":func:`~nnAudio.Spectrogram.Griffin_Lim`. The parameters (e.g. n_fft, window) need to be the", "cqt kernel done, n_fft = \",self.n_fft) # If center==True, the", "Spectrogram.STFT() >>> specs = spec_layer(x) \"\"\" def __init__(self, n_fft=2048, win_length=None,", "norm=basis_norm, topbin_check=False) # For normalization in the end freqs =", "self.refresh_win = refresh_win start = time() # Create the window", "fmax_t, Q, self.n_octaves, verbose) self.register_buffer('early_downsample_filter', early_downsample_filter) if verbose==True: print(\"Early downsampling", "torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels) cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels) self.register_parameter('cqt_kernels_real', cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)", "the time index is the beginning of the iSTFT kernel,", "compute real and imag part. signal lies in the real", "= torch.cos(torch.atan2(CQT_imag,CQT_real)) phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def forward_manual(self,x):", "\"\"\" Best-attempt spectrogram inversion \"\"\" def loss_fn(pred, target): pred =", "hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False,", "torch.sin(torch.atan2(CQT_imag,CQT_real)) return torch.stack((phase_real,phase_imag), -1) def extra_repr(self) -> str: return 'STFT", "1:] /= np.sqrt(N / 2) * 2 V = 2", "the higest CQT bin is inferred from the ``n_bins`` and", "shape to do inverse # if self.center: # if self.pad_mode", "trainable_STFT self.verbose = verbose # Preparing for the stft layer.", "@ pred_stft loss = loss_fn(pred_mel, melspec) losses.append(loss.item()) loss.backward() optimizer.step() #", "our tensor is in the shape of (batch, freq_bins, timesteps,", "win_length = n_fft if hop_length==None: hop_length = int(win_length // 4)", "i.e. input shape = (batch, n_fft, 1) \"\"\" def __init__(self,", "# ifft(X_imag)*1j = (b1, b2)*1j # = (-b2, b1) a1", "domain cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32)) cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32)) if verbose==True: print(\"CQT", "if trainable_kernels: # Making all these variables trainable kernel_sin =", "prevent Nan gradient when sqrt(0) due to output=0 else: return", "= fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins if fmax_t", "pass filter ...\", end='\\r') start = time() # self.lowpass_filter =", "int The window size for the STFT. Default value is", "windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to", "cqt_kernels_real) self.register_parameter('cqt_kernels_imag', cqt_kernels_imag) else: self.register_buffer('cqt_kernels_real', cqt_kernels_real) self.register_buffer('cqt_kernels_imag', cqt_kernels_imag) print(\"STFT kernels", "works for complex number,\" \\ \"make sure our tensor is", "It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``", "ref) self.top_db = top_db self.n_mfcc = n_mfcc def _power_to_db(self, S):", "frequency domain CQT kernel is trainable or not. Default is", "1/(2**(1/bins_per_octave)-1) print(\"Creating CQT kernels ...\", end='\\r') start = time() cqt_kernels,", "sum square. If you have an input with fixed number", "model training. Default value is ``False`` output_format : str Control", "output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'): super().__init__() # norm arg is", "updated during model training. Default value is ``False`` output_format :", "that will be used for _power_to_db if amin <= 0:", "the signal. x_imag : torch tensor Imaginary part of the", "be windowed window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1) # kernel_sin and kernel_cos have", "Spectrogram Classes ---------------------------### class STFT(torch.nn.Module): \"\"\"This function is to calculate", "if self.center: # if self.pad_mode == 'constant': # padding =", "bins. Default value is 128. hop_length : int The hop", "class STFT(torch.nn.Module): \"\"\"This function is to calculate the short-time Fourier", "training. Default value is ``False``. output_format : str Determine the", "input follows these 3 shapes. Most of the arguments follow", "hop_length==None: hop_length = int(win_length // 4) self.output_format = output_format self.trainable", "self.wcos, stride=self.stride) # Doing STFT by using conv1d # remove", "return torch.sqrt(spec) elif output_format=='Complex': return torch.stack((spec_real,-spec_imag), -1) # Remember the", "batch_wise_max - self.top_db) return log_spec def _dct(self, x, norm=None): '''", ": bool Determine if the window function is trainable or", "trainable={}'.format( self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable ) class MelSpectrogram(torch.nn.Module): \"\"\"This function", "``fmin`` and ``fmax``. If 'no' is used, the bin will", "512. fmin : float The frequency for the lowest CQT", "for stft wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft, freq_bins=n_fft, window=window, freq_scale=freq_scale,", "from the 1992 alogrithm [2] [1] <NAME>. “CONSTANT-Q TRANSFORM TOOLBOX", "= iSTFT self.trainable = trainable start = time() # Create", "stft_inversion_params=None): default_mel_inversion_params = {} default_stft_inversion_params = {} mel_inversion_params = mel_inversion_params", "parameters (e.g. n_fft, window) need to be the same as", "shape \"\"\" x = self.melspec_layer(x) x = self._power_to_db(x) x =", "the real part real = a1 - b2 real =", "the complex value spectrograms. If you have the magnitude spectrograms,", "seconds\".format(time()-start)) if trainable_STFT: wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels) wcos = torch.nn.Parameter(wcos,", "with and without early downsampling are more or less the", "raise ParameterError('top_db must be non-negative') # make the dim same", "will be used for _power_to_db if amin <= 0: raise", "= n_fft self.freq_bins = freq_bins self.trainable = trainable self.pad_amount =", "2 self.refresh_win = refresh_win start = time() # Create the", "downsampling factor is to downsample the input audio to reduce", "in the shape of (batch, freq_bins, timesteps)\" # Initializing Random", "self.early_downsample_filter, self.downsample_factor) hop = self.hop_length CQT = get_cqt_complex(x, self.wcos, self.wsin,", "= broadcast_dim(x) spec = self.stft(x, output_format='Magnitude')**self.power melspec = torch.matmul(self.mel_basis, spec)", "conv2d(X_real_bc, self.kernel_cos, stride=(1,1)) b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1)) # compute", "of the signal. \"\"\" x_real = broadcast_dim(x_real) x_imag = broadcast_dim(x_imag)", "if the STFT kenrels are trainable or not. If ``True``,", "self.wcos, self.wsin, hop, self.padding) # Getting the top octave CQT", "the same as the forward STFT. fmax : int The", "self.register_parameter('wsin', wsin) self.register_parameter('wcos', wcos) else: self.register_buffer('wsin', wsin) self.register_buffer('wcos', wcos) if", "with extra care. Parameters ---------- n_fft : int The window", "is already in the frequency domain cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32)) cqt_kernels_imag", "class iSTFT(torch.nn.Module): \"\"\"This class is to convert spectrograms back to", "padding(x) # CQT CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) CQT_imag =", "during the ``forward`` method. verbose : bool If ``True``, it", "'constant': padding = nn.ConstantPad1d(self.n_fft//2, 0) elif self.pad_mode == 'reflect': padding", "n_freq = mel_basis.shape melspec = melspec.detach().view(-1, n_mels, time) if random_start:", "<NAME>. “An efficient algorithm for the calculation of a constant", "if the window function is trainable if trainable_window: window_mask =", ": bool Normalization for the CQT result. basis_norm : int", "x_shape[-1] v = torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])], dim=2)", ":, 1] # broadcast dimensions to support 2D convolution X_real_bc", "convert spectrograms back to waveforms. It only works for the", "bin frequency fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave) self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting", "# It will be used to calculate filter_cutoff and creating", "the inverse is perfect, please use with extra care. Parameters", "in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``; ``Phase`` will", "mel_inversion_params or {} stft_inversion_params = stft_inversion_params or {} if mel_inversion_params:", "else: self.register_buffer('window_mask', window_mask) if verbose==True: print(\"iSTFT kernels created, time used", "self.hop_length = hop_length self.center = center self.pad_mode = pad_mode self.output_format", "for the input audio. It is used to calucate the", "n_freq, time) pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps) else: pred_stft =", "the usage is same as ``torch.nn.Module``. This alogrithm uses the", "It will be used to calculate filter_cutoff and creating CQT", "= {}, CQT kernel size = {}'.format( (*self.wcos.shape,), (*self.cqt_kernels_real.shape,) )", "= {}'.format( (self.n_mfcc) ) class CQT1992(torch.nn.Module): \"\"\" This alogrithm uses", "self.downsample_factor, early_downsample_filter, \\ self.earlydownsample = get_early_downsample_params(sr, hop_length, fmax_t, Q, self.n_octaves,", "calculate the short-time Fourier transform (STFT) of the input signal." ]
[ "= get_kobart_tokenizer() else: self.tok = tok self.num_workers = num_workers @staticmethod", "pytorch_lightning import loggers as pl_loggers from torch.utils.data import DataLoader, Dataset", "'step', 'frequency': 1} return [optimizer], [lr_scheduler] class KoBARTConditionalGeneration(Base): def __init__(self,", "batch_idx): outs = self(batch) loss = outs['loss'] return (loss) def", "help='batch size for training (default: 96)') parser.add_argument('--lr', type=float, default=3e-5, help='The", "(default: 96)') parser.add_argument('--lr', type=float, default=3e-5, help='The initial learning rate') parser.add_argument('--warmup_ratio',", "for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n,", "argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--batch-size', type=int, default=14, help='batch size for training", "parser = Base.add_model_specific_args(parser) parser = ArgsBase.add_model_specific_args(parser) parser = KobartSummaryModule.add_model_specific_args(parser) parser", "self.hparams = hparams @staticmethod def add_model_specific_args(parent_parser): # add model specific", "train def val_dataloader(self): val = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return", "batch_size=8, num_workers=5): super().__init__() self.batch_size = batch_size self.max_len = max_len self.train_file_path", "{num_workers}, data length {data_len}') num_train_steps = int(data_len / (self.hparams.batch_size *", "dirpath=args.default_root_dir, filename='model_chp/{epoch:02d}-{val_loss:.3f}', verbose=True, save_last=True, mode='min', save_top_k=-1, prefix='kobart_translation') tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir,", "pl_loggers from torch.utils.data import DataLoader, Dataset from dataset import KoBARTSummaryDataset", "max_len=args.max_len, batch_size=args.batch_size, num_workers=args.num_workers) checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss', dirpath=args.default_root_dir, filename='model_chp/{epoch:02d}-{val_loss:.3f}', verbose=True, save_last=True,", "learning rate') parser.add_argument('--warmup_ratio', type=float, default=0.1, help='warmup ratio') parser.add_argument('--model_path', type=str, default=None,", "# Prepare optimizer param_optimizer = list(self.model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias',", "1) * (self.hparams.num_nodes if self.hparams.num_nodes is not None else 1)", "super().__init__() self.batch_size = batch_size self.max_len = max_len self.train_file_path = train_file", "(self.hparams.gpus if self.hparams.gpus is not None else 1) * (self.hparams.num_nodes", "'<s>' self.eos_token = '</s>' self.pad_token_id = 0 self.tokenizer = get_kobart_tokenizer()", "parser.add_argument('--batch_size', type=int, default=28, help='') parser.add_argument('--max_len', type=int, default=512, help='max seq len')", "optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.lr, correct_bias=False) # warm up lr num_workers", "0 self.tokenizer = get_kobart_tokenizer() def forward(self, inputs): attention_mask = inputs['input_ids'].ne(self.pad_token_id).float()", "(self.hparams.batch_size * num_workers) * self.hparams.max_epochs) logging.info(f'num_train_steps : {num_train_steps}') num_warmup_steps =", "logger.setLevel(logging.INFO) class ArgsBase(): @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser],", "default=28, help='') parser.add_argument('--max_len', type=int, default=512, help='max seq len') return parser", "def test_dataloader(self): test = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return test", "param_optimizer = list(self.model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters =", "in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.lr, correct_bias=False)", "in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in", "logging.info(f'num_train_steps : {num_train_steps}') num_warmup_steps = int(num_train_steps * self.hparams.warmup_ratio) logging.info(f'num_warmup_steps :", "argparse.ArgumentParser(description='KoBART translation') parser.add_argument('--checkpoint_path', type=str, help='checkpoint path') logger = logging.getLogger() logger.setLevel(logging.INFO)", "= argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--num_workers', type=int, default=5, help='num of worker", "def val_dataloader(self): val = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return val", "model = KoBARTConditionalGeneration(args) dm = KobartSummaryModule(args.train_file, args.test_file, None, max_len=args.max_len, batch_size=args.batch_size,", "loss in outputs: losses.append(loss) self.log('val_loss', torch.stack(losses).mean(), prog_bar=True) if __name__ ==", "ratio') parser.add_argument('--model_path', type=str, default=None, help='kobart model path') return parser def", "= BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model()) self.model.train() self.bos_token = '<s>' self.eos_token = '</s>' self.pad_token_id", "hparams @staticmethod def add_model_specific_args(parent_parser): # add model specific args parser", "def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--num_workers', type=int, default=5,", "= argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--batch-size', type=int, default=14, help='batch size for", "training (default: 96)') parser.add_argument('--lr', type=float, default=3e-5, help='The initial learning rate')", "argparse import logging import os import numpy as np import", "self.tok, self.max_len) def train_dataloader(self): train = DataLoader(self.train, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True)", "batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return test class Base(pl.LightningModule): def __init__(self, hparams,", "import BartForConditionalGeneration, PreTrainedTokenizerFast from transformers.optimization import AdamW, get_cosine_schedule_with_warmup from kobart", "checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss', dirpath=args.default_root_dir, filename='model_chp/{epoch:02d}-{val_loss:.3f}', verbose=True, save_last=True, mode='min', save_top_k=-1, prefix='kobart_translation')", "param_optimizer if not any( nd in n for nd in", "= logging.getLogger() logger.setLevel(logging.INFO) class ArgsBase(): @staticmethod def add_model_specific_args(parent_parser): parser =", "ArgsBase(): @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--train_file',", "shuffle=False) return test class Base(pl.LightningModule): def __init__(self, hparams, **kwargs) ->", "scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps) lr_scheduler = {'scheduler': scheduler,", "not any( nd in n for nd in no_decay)], 'weight_decay':", "def validation_step(self, batch, batch_idx): outs = self(batch) loss = outs['loss']", "AdamW, get_cosine_schedule_with_warmup from kobart import get_pytorch_kobart_model, get_kobart_tokenizer parser = argparse.ArgumentParser(description='KoBART", "= get_kobart_tokenizer() def forward(self, inputs): attention_mask = inputs['input_ids'].ne(self.pad_token_id).float() decoder_attention_mask =", "type=str, default=None, help='kobart model path') return parser def configure_optimizers(self): #", "train_dataloader(self): train = DataLoader(self.train, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) return train def", "<gh_stars>10-100 import argparse import logging import os import numpy as", "super(KoBARTConditionalGeneration, self).__init__(hparams, **kwargs) self.model = BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model()) self.model.train() self.bos_token = '<s>'", "data length {data_len}') num_train_steps = int(data_len / (self.hparams.batch_size * num_workers)", "losses.append(loss) self.log('val_loss', torch.stack(losses).mean(), prog_bar=True) if __name__ == '__main__': parser =", "import logging import os import numpy as np import pandas", "type=float, default=0.1, help='warmup ratio') parser.add_argument('--model_path', type=str, default=None, help='kobart model path')", "{'params': [p for n, p in param_optimizer if not any(", "parents=[parent_parser], add_help=False) parser.add_argument('--train_file', type=str, default='data/train.tsv', help='train file') parser.add_argument('--test_file', type=str, default='data/test.tsv',", "setup(self, stage): # split dataset self.train = KoBARTSummaryDataset(self.train_file_path, self.tok, self.max_len)", "DataLoader, Dataset from dataset import KoBARTSummaryDataset from transformers import BartForConditionalGeneration,", "model path') return parser def configure_optimizers(self): # Prepare optimizer param_optimizer", "as pl import torch from pytorch_lightning import loggers as pl_loggers", "for loss in outputs: losses.append(loss) self.log('val_loss', torch.stack(losses).mean(), prog_bar=True) if __name__", "= list(self.model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [", "parser # OPTIONAL, called for every GPU/machine (assigning state is", "'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.lr, correct_bias=False) # warm", "hparams, **kwargs) -> None: super(Base, self).__init__() self.hparams = hparams @staticmethod", "'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if", "= argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--train_file', type=str, default='data/train.tsv', help='train file') parser.add_argument('--test_file',", "transformers import BartForConditionalGeneration, PreTrainedTokenizerFast from transformers.optimization import AdamW, get_cosine_schedule_with_warmup from", "outs['loss'] return (loss) def validation_epoch_end(self, outputs): losses = [] for", "== '__main__': parser = Base.add_model_specific_args(parser) parser = ArgsBase.add_model_specific_args(parser) parser =", "parser.add_argument('--test_file', type=str, default='data/test.tsv', help='test file') parser.add_argument('--batch_size', type=int, default=28, help='') parser.add_argument('--max_len',", "verbose=True, save_last=True, mode='min', save_top_k=-1, prefix='kobart_translation') tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs')) lr_logger", "num_warmup_steps = int(num_train_steps * self.hparams.warmup_ratio) logging.info(f'num_warmup_steps : {num_warmup_steps}') scheduler =", "def __init__(self, hparams, **kwargs): super(KoBARTConditionalGeneration, self).__init__(hparams, **kwargs) self.model = BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model())", "# OPTIONAL, called for every GPU/machine (assigning state is OK)", "prog_bar=True) if __name__ == '__main__': parser = Base.add_model_specific_args(parser) parser =", "called for every GPU/machine (assigning state is OK) def setup(self,", "batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return val def test_dataloader(self): test = DataLoader(self.test,", "self.pad_token_id = 0 self.tokenizer = get_kobart_tokenizer() def forward(self, inputs): attention_mask", "tok is None: self.tok = get_kobart_tokenizer() else: self.tok = tok", "tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs')) lr_logger = pl.callbacks.LearningRateMonitor() trainer = pl.Trainer.from_argparse_args(args,", "def forward(self, inputs): attention_mask = inputs['input_ids'].ne(self.pad_token_id).float() decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float() return", "# warm up lr num_workers = (self.hparams.gpus if self.hparams.gpus is", "loss = outs['loss'] return (loss) def validation_epoch_end(self, outputs): losses =", "inputs['decoder_input_ids'].ne(self.pad_token_id).float() return self.model(input_ids=inputs['input_ids'], attention_mask=attention_mask, decoder_input_ids=inputs['decoder_input_ids'], decoder_attention_mask=decoder_attention_mask, labels=inputs['labels'], return_dict=True) def training_step(self,", "parser.add_argument('--num_workers', type=int, default=5, help='num of worker for dataloader') return parser", "def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--train_file', type=str, default='data/train.tsv',", "no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer", "logging import os import numpy as np import pandas as", "if not any( nd in n for nd in no_decay)],", ": {num_train_steps}') num_warmup_steps = int(num_train_steps * self.hparams.warmup_ratio) logging.info(f'num_warmup_steps : {num_warmup_steps}')", "self.train_file_path = train_file self.test_file_path = test_file if tok is None:", "parser.add_argument('--model_path', type=str, default=None, help='kobart model path') return parser def configure_optimizers(self):", "= batch_size self.max_len = max_len self.train_file_path = train_file self.test_file_path =", "losses = [] for loss in outputs: losses.append(loss) self.log('val_loss', torch.stack(losses).mean(),", "KoBARTConditionalGeneration(args) dm = KobartSummaryModule(args.train_file, args.test_file, None, max_len=args.max_len, batch_size=args.batch_size, num_workers=args.num_workers) checkpoint_callback", "None: self.tok = get_kobart_tokenizer() else: self.tok = tok self.num_workers =", "nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p", "[optimizer], [lr_scheduler] class KoBARTConditionalGeneration(Base): def __init__(self, hparams, **kwargs): super(KoBARTConditionalGeneration, self).__init__(hparams,", "else 1) data_len = len(self.train_dataloader().dataset) logging.info(f'number of workers {num_workers}, data", "= DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return test class Base(pl.LightningModule): def", "def __init__(self, train_file, test_file, tok, max_len=512, batch_size=8, num_workers=5): super().__init__() self.batch_size", "__init__(self, hparams, **kwargs): super(KoBARTConditionalGeneration, self).__init__(hparams, **kwargs) self.model = BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model()) self.model.train()", "worker for dataloader') return parser # OPTIONAL, called for every", "decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float() return self.model(input_ids=inputs['input_ids'], attention_mask=attention_mask, decoder_input_ids=inputs['decoder_input_ids'], decoder_attention_mask=decoder_attention_mask, labels=inputs['labels'], return_dict=True)", "self.log('train_loss', loss, prog_bar=True) return loss def validation_step(self, batch, batch_idx): outs", "= 0 self.tokenizer = get_kobart_tokenizer() def forward(self, inputs): attention_mask =", "for dataloader') return parser # OPTIONAL, called for every GPU/machine", "in outputs: losses.append(loss) self.log('val_loss', torch.stack(losses).mean(), prog_bar=True) if __name__ == '__main__':", "optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer", "help='') parser.add_argument('--max_len', type=int, default=512, help='max seq len') return parser class", "parser def configure_optimizers(self): # Prepare optimizer param_optimizer = list(self.model.named_parameters()) no_decay", "lr_scheduler = {'scheduler': scheduler, 'monitor': 'loss', 'interval': 'step', 'frequency': 1}", "for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters,", "= tok self.num_workers = num_workers @staticmethod def add_model_specific_args(parent_parser): parser =", "logging.info(f'num_warmup_steps : {num_warmup_steps}') scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps) lr_scheduler", "Prepare optimizer param_optimizer = list(self.model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']", "parser.add_argument('--train_file', type=str, default='data/train.tsv', help='train file') parser.add_argument('--test_file', type=str, default='data/test.tsv', help='test file')", "nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.lr,", "def __init__(self, hparams, **kwargs) -> None: super(Base, self).__init__() self.hparams =", "mode='min', save_top_k=-1, prefix='kobart_translation') tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs')) lr_logger = pl.callbacks.LearningRateMonitor()", "{'params': [p for n, p in param_optimizer if any( nd", "from dataset import KoBARTSummaryDataset from transformers import BartForConditionalGeneration, PreTrainedTokenizerFast from", "= test_file if tok is None: self.tok = get_kobart_tokenizer() else:", "[ {'params': [p for n, p in param_optimizer if not", "self.num_workers = num_workers @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser],", "default=0.1, help='warmup ratio') parser.add_argument('--model_path', type=str, default=None, help='kobart model path') return", "{'scheduler': scheduler, 'monitor': 'loss', 'interval': 'step', 'frequency': 1} return [optimizer],", "OK) def setup(self, stage): # split dataset self.train = KoBARTSummaryDataset(self.train_file_path,", "96)') parser.add_argument('--lr', type=float, default=3e-5, help='The initial learning rate') parser.add_argument('--warmup_ratio', type=float,", "path') return parser def configure_optimizers(self): # Prepare optimizer param_optimizer =", "self.max_len) def train_dataloader(self): train = DataLoader(self.train, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) return", "(assigning state is OK) def setup(self, stage): # split dataset", "= int(num_train_steps * self.hparams.warmup_ratio) logging.info(f'num_warmup_steps : {num_warmup_steps}') scheduler = get_cosine_schedule_with_warmup(", "default='data/train.tsv', help='train file') parser.add_argument('--test_file', type=str, default='data/test.tsv', help='test file') parser.add_argument('--batch_size', type=int,", "get_pytorch_kobart_model, get_kobart_tokenizer parser = argparse.ArgumentParser(description='KoBART translation') parser.add_argument('--checkpoint_path', type=str, help='checkpoint path')", "batch, batch_idx): outs = self(batch) loss = outs['loss'] return (loss)", "0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.lr, correct_bias=False) # warm up", "attention_mask = inputs['input_ids'].ne(self.pad_token_id).float() decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float() return self.model(input_ids=inputs['input_ids'], attention_mask=attention_mask, decoder_input_ids=inputs['decoder_input_ids'],", "self).__init__() self.hparams = hparams @staticmethod def add_model_specific_args(parent_parser): # add model", "num_workers=5): super().__init__() self.batch_size = batch_size self.max_len = max_len self.train_file_path =", "torch.stack(losses).mean(), prog_bar=True) if __name__ == '__main__': parser = Base.add_model_specific_args(parser) parser", "logging.getLogger() logger.setLevel(logging.INFO) class ArgsBase(): @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser(", "length {data_len}') num_train_steps = int(data_len / (self.hparams.batch_size * num_workers) *", "of worker for dataloader') return parser # OPTIONAL, called for", "save_last=True, mode='min', save_top_k=-1, prefix='kobart_translation') tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs')) lr_logger =", "KoBARTSummaryDataset from transformers import BartForConditionalGeneration, PreTrainedTokenizerFast from transformers.optimization import AdamW,", "parser = argparse.ArgumentParser(description='KoBART translation') parser.add_argument('--checkpoint_path', type=str, help='checkpoint path') logger =", "= DataLoader(self.train, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) return train def val_dataloader(self): val", "1} return [optimizer], [lr_scheduler] class KoBARTConditionalGeneration(Base): def __init__(self, hparams, **kwargs):", "['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n,", "type=str, help='checkpoint path') logger = logging.getLogger() logger.setLevel(logging.INFO) class ArgsBase(): @staticmethod", "[lr_scheduler] class KoBARTConditionalGeneration(Base): def __init__(self, hparams, **kwargs): super(KoBARTConditionalGeneration, self).__init__(hparams, **kwargs)", "argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--train_file', type=str, default='data/train.tsv', help='train file') parser.add_argument('--test_file', type=str,", "__init__(self, hparams, **kwargs) -> None: super(Base, self).__init__() self.hparams = hparams", "def train_dataloader(self): train = DataLoader(self.train, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) return train", "= max_len self.train_file_path = train_file self.test_file_path = test_file if tok", "max_len=512, batch_size=8, num_workers=5): super().__init__() self.batch_size = batch_size self.max_len = max_len", "import argparse import logging import os import numpy as np", "parser = pl.Trainer.add_argparse_args(parser) args = parser.parse_args() logging.info(args) model = KoBARTConditionalGeneration(args)", "state is OK) def setup(self, stage): # split dataset self.train", "in param_optimizer if any( nd in n for nd in", "training_step(self, batch, batch_idx): outs = self(batch) loss = outs.loss self.log('train_loss',", "= ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for", "None else 1) data_len = len(self.train_dataloader().dataset) logging.info(f'number of workers {num_workers},", "DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return test class Base(pl.LightningModule): def __init__(self,", "class KoBARTConditionalGeneration(Base): def __init__(self, hparams, **kwargs): super(KoBARTConditionalGeneration, self).__init__(hparams, **kwargs) self.model", "save_top_k=-1, prefix='kobart_translation') tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs')) lr_logger = pl.callbacks.LearningRateMonitor() trainer", "lr=self.hparams.lr, correct_bias=False) # warm up lr num_workers = (self.hparams.gpus if", "pl import torch from pytorch_lightning import loggers as pl_loggers from", "int(num_train_steps * self.hparams.warmup_ratio) logging.info(f'num_warmup_steps : {num_warmup_steps}') scheduler = get_cosine_schedule_with_warmup( optimizer,", "args.test_file, None, max_len=args.max_len, batch_size=args.batch_size, num_workers=args.num_workers) checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss', dirpath=args.default_root_dir, filename='model_chp/{epoch:02d}-{val_loss:.3f}',", "as np import pandas as pd import pytorch_lightning as pl", "parents=[parent_parser], add_help=False) parser.add_argument('--batch-size', type=int, default=14, help='batch size for training (default:", "decoder_attention_mask=decoder_attention_mask, labels=inputs['labels'], return_dict=True) def training_step(self, batch, batch_idx): outs = self(batch)", "KobartSummaryModule.add_model_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) args = parser.parse_args() logging.info(args) model =", "= outs['loss'] return (loss) def validation_epoch_end(self, outputs): losses = []", "parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--train_file', type=str, default='data/train.tsv', help='train file')", "self.max_len) self.test = KoBARTSummaryDataset(self.test_file_path, self.tok, self.max_len) def train_dataloader(self): train =", "val_dataloader(self): val = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return val def", "'frequency': 1} return [optimizer], [lr_scheduler] class KoBARTConditionalGeneration(Base): def __init__(self, hparams,", "pandas as pd import pytorch_lightning as pl import torch from", "split dataset self.train = KoBARTSummaryDataset(self.train_file_path, self.tok, self.max_len) self.test = KoBARTSummaryDataset(self.test_file_path,", "for every GPU/machine (assigning state is OK) def setup(self, stage):", "validation_epoch_end(self, outputs): losses = [] for loss in outputs: losses.append(loss)", "inputs['input_ids'].ne(self.pad_token_id).float() decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float() return self.model(input_ids=inputs['input_ids'], attention_mask=attention_mask, decoder_input_ids=inputs['decoder_input_ids'], decoder_attention_mask=decoder_attention_mask, labels=inputs['labels'],", "import numpy as np import pandas as pd import pytorch_lightning", "stage): # split dataset self.train = KoBARTSummaryDataset(self.train_file_path, self.tok, self.max_len) self.test", "parser.add_argument('--batch-size', type=int, default=14, help='batch size for training (default: 96)') parser.add_argument('--lr',", "get_cosine_schedule_with_warmup from kobart import get_pytorch_kobart_model, get_kobart_tokenizer parser = argparse.ArgumentParser(description='KoBART translation')", "return parser class KobartSummaryModule(pl.LightningDataModule): def __init__(self, train_file, test_file, tok, max_len=512,", "import KoBARTSummaryDataset from transformers import BartForConditionalGeneration, PreTrainedTokenizerFast from transformers.optimization import", "any( nd in n for nd in no_decay)], 'weight_decay': 0.0}", "if any( nd in n for nd in no_decay)], 'weight_decay':", "(loss) def validation_epoch_end(self, outputs): losses = [] for loss in", "torch from pytorch_lightning import loggers as pl_loggers from torch.utils.data import", "nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params':", "= AdamW(optimizer_grouped_parameters, lr=self.hparams.lr, correct_bias=False) # warm up lr num_workers =", "warm up lr num_workers = (self.hparams.gpus if self.hparams.gpus is not", "= KoBARTConditionalGeneration(args) dm = KobartSummaryModule(args.train_file, args.test_file, None, max_len=args.max_len, batch_size=args.batch_size, num_workers=args.num_workers)", "prog_bar=True) return loss def validation_step(self, batch, batch_idx): outs = self(batch)", "@staticmethod def add_model_specific_args(parent_parser): # add model specific args parser =", "**kwargs) self.model = BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model()) self.model.train() self.bos_token = '<s>' self.eos_token =", "**kwargs) -> None: super(Base, self).__init__() self.hparams = hparams @staticmethod def", "add_help=False) parser.add_argument('--batch-size', type=int, default=14, help='batch size for training (default: 96)')", "= ArgsBase.add_model_specific_args(parser) parser = KobartSummaryModule.add_model_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) args =", "if tok is None: self.tok = get_kobart_tokenizer() else: self.tok =", "in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p", "KoBARTSummaryDataset(self.test_file_path, self.tok, self.max_len) def train_dataloader(self): train = DataLoader(self.train, batch_size=self.batch_size, num_workers=self.num_workers,", "logging.info(args) model = KoBARTConditionalGeneration(args) dm = KobartSummaryModule(args.train_file, args.test_file, None, max_len=args.max_len,", "{data_len}') num_train_steps = int(data_len / (self.hparams.batch_size * num_workers) * self.hparams.max_epochs)", "parents=[parent_parser], add_help=False) parser.add_argument('--num_workers', type=int, default=5, help='num of worker for dataloader')", "BartForConditionalGeneration, PreTrainedTokenizerFast from transformers.optimization import AdamW, get_cosine_schedule_with_warmup from kobart import", "self.test_file_path = test_file if tok is None: self.tok = get_kobart_tokenizer()", "{num_warmup_steps}') scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps) lr_scheduler = {'scheduler':", "self.model(input_ids=inputs['input_ids'], attention_mask=attention_mask, decoder_input_ids=inputs['decoder_input_ids'], decoder_attention_mask=decoder_attention_mask, labels=inputs['labels'], return_dict=True) def training_step(self, batch, batch_idx):", "[] for loss in outputs: losses.append(loss) self.log('val_loss', torch.stack(losses).mean(), prog_bar=True) if", "numpy as np import pandas as pd import pytorch_lightning as", "pytorch_lightning as pl import torch from pytorch_lightning import loggers as", ": {num_warmup_steps}') scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps) lr_scheduler =", "= KobartSummaryModule.add_model_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) args = parser.parse_args() logging.info(args) model", "from torch.utils.data import DataLoader, Dataset from dataset import KoBARTSummaryDataset from", "argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--num_workers', type=int, default=5, help='num of worker for", "PreTrainedTokenizerFast from transformers.optimization import AdamW, get_cosine_schedule_with_warmup from kobart import get_pytorch_kobart_model,", "help='kobart model path') return parser def configure_optimizers(self): # Prepare optimizer", "AdamW(optimizer_grouped_parameters, lr=self.hparams.lr, correct_bias=False) # warm up lr num_workers = (self.hparams.gpus", "self.hparams.num_nodes is not None else 1) data_len = len(self.train_dataloader().dataset) logging.info(f'number", "@staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--num_workers', type=int,", "__name__ == '__main__': parser = Base.add_model_specific_args(parser) parser = ArgsBase.add_model_specific_args(parser) parser", "parser class KobartSummaryModule(pl.LightningDataModule): def __init__(self, train_file, test_file, tok, max_len=512, batch_size=8,", "parser.add_argument('--lr', type=float, default=3e-5, help='The initial learning rate') parser.add_argument('--warmup_ratio', type=float, default=0.1,", "p in param_optimizer if any( nd in n for nd", "n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for", "num_workers=self.num_workers, shuffle=False) return test class Base(pl.LightningModule): def __init__(self, hparams, **kwargs)", "@staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--train_file', type=str,", "num_workers @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--num_workers',", "class Base(pl.LightningModule): def __init__(self, hparams, **kwargs) -> None: super(Base, self).__init__()", "is not None else 1) * (self.hparams.num_nodes if self.hparams.num_nodes is", "help='test file') parser.add_argument('--batch_size', type=int, default=28, help='') parser.add_argument('--max_len', type=int, default=512, help='max", "scheduler, 'monitor': 'loss', 'interval': 'step', 'frequency': 1} return [optimizer], [lr_scheduler]", "args parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--batch-size', type=int, default=14, help='batch", "default=512, help='max seq len') return parser class KobartSummaryModule(pl.LightningDataModule): def __init__(self,", "'monitor': 'loss', 'interval': 'step', 'frequency': 1} return [optimizer], [lr_scheduler] class", "as pd import pytorch_lightning as pl import torch from pytorch_lightning", "dataset import KoBARTSummaryDataset from transformers import BartForConditionalGeneration, PreTrainedTokenizerFast from transformers.optimization", "type=int, default=512, help='max seq len') return parser class KobartSummaryModule(pl.LightningDataModule): def", "add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--num_workers', type=int, default=5, help='num", "import loggers as pl_loggers from torch.utils.data import DataLoader, Dataset from", "return self.model(input_ids=inputs['input_ids'], attention_mask=attention_mask, decoder_input_ids=inputs['decoder_input_ids'], decoder_attention_mask=decoder_attention_mask, labels=inputs['labels'], return_dict=True) def training_step(self, batch,", "self.hparams.warmup_ratio) logging.info(f'num_warmup_steps : {num_warmup_steps}') scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps)", "nd in n for nd in no_decay)], 'weight_decay': 0.0} ]", "import get_pytorch_kobart_model, get_kobart_tokenizer parser = argparse.ArgumentParser(description='KoBART translation') parser.add_argument('--checkpoint_path', type=str, help='checkpoint", "else: self.tok = tok self.num_workers = num_workers @staticmethod def add_model_specific_args(parent_parser):", "self.tok, self.max_len) self.test = KoBARTSummaryDataset(self.test_file_path, self.tok, self.max_len) def train_dataloader(self): train", "shuffle=True) return train def val_dataloader(self): val = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers,", "help='train file') parser.add_argument('--test_file', type=str, default='data/test.tsv', help='test file') parser.add_argument('--batch_size', type=int, default=28,", "super(Base, self).__init__() self.hparams = hparams @staticmethod def add_model_specific_args(parent_parser): # add", "up lr num_workers = (self.hparams.gpus if self.hparams.gpus is not None", "= get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps) lr_scheduler = {'scheduler': scheduler, 'monitor':", "from transformers import BartForConditionalGeneration, PreTrainedTokenizerFast from transformers.optimization import AdamW, get_cosine_schedule_with_warmup", "shuffle=False) return val def test_dataloader(self): test = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers,", "return loss def validation_step(self, batch, batch_idx): outs = self(batch) loss", "Base.add_model_specific_args(parser) parser = ArgsBase.add_model_specific_args(parser) parser = KobartSummaryModule.add_model_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser)", "n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer =", "p in param_optimizer if not any( nd in n for", "logging.info(f'number of workers {num_workers}, data length {data_len}') num_train_steps = int(data_len", "max_len self.train_file_path = train_file self.test_file_path = test_file if tok is", "= {'scheduler': scheduler, 'monitor': 'loss', 'interval': 'step', 'frequency': 1} return", "= (self.hparams.gpus if self.hparams.gpus is not None else 1) *", "loss = outs.loss self.log('train_loss', loss, prog_bar=True) return loss def validation_step(self,", "KobartSummaryModule(args.train_file, args.test_file, None, max_len=args.max_len, batch_size=args.batch_size, num_workers=args.num_workers) checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss', dirpath=args.default_root_dir,", "test class Base(pl.LightningModule): def __init__(self, hparams, **kwargs) -> None: super(Base,", "import pandas as pd import pytorch_lightning as pl import torch", "(self.hparams.num_nodes if self.hparams.num_nodes is not None else 1) data_len =", "self.tok = get_kobart_tokenizer() else: self.tok = tok self.num_workers = num_workers", "= len(self.train_dataloader().dataset) logging.info(f'number of workers {num_workers}, data length {data_len}') num_train_steps", "self.tokenizer = get_kobart_tokenizer() def forward(self, inputs): attention_mask = inputs['input_ids'].ne(self.pad_token_id).float() decoder_attention_mask", "else 1) * (self.hparams.num_nodes if self.hparams.num_nodes is not None else", "self.log('val_loss', torch.stack(losses).mean(), prog_bar=True) if __name__ == '__main__': parser = Base.add_model_specific_args(parser)", "model specific args parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--batch-size', type=int,", "n, p in param_optimizer if not any( nd in n", "DataLoader(self.train, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) return train def val_dataloader(self): val =", "type=int, default=14, help='batch size for training (default: 96)') parser.add_argument('--lr', type=float,", "correct_bias=False) # warm up lr num_workers = (self.hparams.gpus if self.hparams.gpus", "add_model_specific_args(parent_parser): # add model specific args parser = argparse.ArgumentParser( parents=[parent_parser],", "[p for n, p in param_optimizer if any( nd in", "num_workers=self.num_workers, shuffle=False) return val def test_dataloader(self): test = DataLoader(self.test, batch_size=self.batch_size,", "BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model()) self.model.train() self.bos_token = '<s>' self.eos_token = '</s>' self.pad_token_id =", "len(self.train_dataloader().dataset) logging.info(f'number of workers {num_workers}, data length {data_len}') num_train_steps =", "get_kobart_tokenizer() else: self.tok = tok self.num_workers = num_workers @staticmethod def", "GPU/machine (assigning state is OK) def setup(self, stage): # split", "type=int, default=28, help='') parser.add_argument('--max_len', type=int, default=512, help='max seq len') return", "is not None else 1) data_len = len(self.train_dataloader().dataset) logging.info(f'number of", "help='max seq len') return parser class KobartSummaryModule(pl.LightningDataModule): def __init__(self, train_file,", "specific args parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--batch-size', type=int, default=14,", "[p for n, p in param_optimizer if not any( nd", "import torch from pytorch_lightning import loggers as pl_loggers from torch.utils.data", "import pytorch_lightning as pl import torch from pytorch_lightning import loggers", "in param_optimizer if not any( nd in n for nd", "0.01}, {'params': [p for n, p in param_optimizer if any(", "= int(data_len / (self.hparams.batch_size * num_workers) * self.hparams.max_epochs) logging.info(f'num_train_steps :", "optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps) lr_scheduler = {'scheduler': scheduler, 'monitor': 'loss', 'interval':", "= inputs['decoder_input_ids'].ne(self.pad_token_id).float() return self.model(input_ids=inputs['input_ids'], attention_mask=attention_mask, decoder_input_ids=inputs['decoder_input_ids'], decoder_attention_mask=decoder_attention_mask, labels=inputs['labels'], return_dict=True) def", "batch, batch_idx): outs = self(batch) loss = outs.loss self.log('train_loss', loss,", "is None: self.tok = get_kobart_tokenizer() else: self.tok = tok self.num_workers", "num_workers = (self.hparams.gpus if self.hparams.gpus is not None else 1)", "self.eos_token = '</s>' self.pad_token_id = 0 self.tokenizer = get_kobart_tokenizer() def", "train_file, test_file, tok, max_len=512, batch_size=8, num_workers=5): super().__init__() self.batch_size = batch_size", "'tb_logs')) lr_logger = pl.callbacks.LearningRateMonitor() trainer = pl.Trainer.from_argparse_args(args, logger=tb_logger, callbacks=[checkpoint_callback, lr_logger])", "'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in", "add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--train_file', type=str, default='data/train.tsv', help='train", "prefix='kobart_translation') tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs')) lr_logger = pl.callbacks.LearningRateMonitor() trainer =", "test_file, tok, max_len=512, batch_size=8, num_workers=5): super().__init__() self.batch_size = batch_size self.max_len", "filename='model_chp/{epoch:02d}-{val_loss:.3f}', verbose=True, save_last=True, mode='min', save_top_k=-1, prefix='kobart_translation') tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs'))", "outs = self(batch) loss = outs.loss self.log('train_loss', loss, prog_bar=True) return", "outputs: losses.append(loss) self.log('val_loss', torch.stack(losses).mean(), prog_bar=True) if __name__ == '__main__': parser", "KobartSummaryModule(pl.LightningDataModule): def __init__(self, train_file, test_file, tok, max_len=512, batch_size=8, num_workers=5): super().__init__()", "outs = self(batch) loss = outs['loss'] return (loss) def validation_epoch_end(self,", "self(batch) loss = outs['loss'] return (loss) def validation_epoch_end(self, outputs): losses", "self.hparams.gpus is not None else 1) * (self.hparams.num_nodes if self.hparams.num_nodes", "lr num_workers = (self.hparams.gpus if self.hparams.gpus is not None else", "get_kobart_tokenizer parser = argparse.ArgumentParser(description='KoBART translation') parser.add_argument('--checkpoint_path', type=str, help='checkpoint path') logger", "args = parser.parse_args() logging.info(args) model = KoBARTConditionalGeneration(args) dm = KobartSummaryModule(args.train_file,", "DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return val def test_dataloader(self): test =", "parser = KobartSummaryModule.add_model_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) args = parser.parse_args() logging.info(args)", "val = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return val def test_dataloader(self):", "= hparams @staticmethod def add_model_specific_args(parent_parser): # add model specific args", "train_file self.test_file_path = test_file if tok is None: self.tok =", "data_len = len(self.train_dataloader().dataset) logging.info(f'number of workers {num_workers}, data length {data_len}')", "= parser.parse_args() logging.info(args) model = KoBARTConditionalGeneration(args) dm = KobartSummaryModule(args.train_file, args.test_file,", "hparams, **kwargs): super(KoBARTConditionalGeneration, self).__init__(hparams, **kwargs) self.model = BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model()) self.model.train() self.bos_token", "size for training (default: 96)') parser.add_argument('--lr', type=float, default=3e-5, help='The initial", "* (self.hparams.num_nodes if self.hparams.num_nodes is not None else 1) data_len", "import DataLoader, Dataset from dataset import KoBARTSummaryDataset from transformers import", "if self.hparams.gpus is not None else 1) * (self.hparams.num_nodes if", "default=3e-5, help='The initial learning rate') parser.add_argument('--warmup_ratio', type=float, default=0.1, help='warmup ratio')", "= num_workers @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False)", "self.batch_size = batch_size self.max_len = max_len self.train_file_path = train_file self.test_file_path", "Dataset from dataset import KoBARTSummaryDataset from transformers import BartForConditionalGeneration, PreTrainedTokenizerFast", "batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) return train def val_dataloader(self): val = DataLoader(self.test,", "return parser def configure_optimizers(self): # Prepare optimizer param_optimizer = list(self.model.named_parameters())", "n, p in param_optimizer if any( nd in n for", "optimizer param_optimizer = list(self.model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters", "no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p", "help='The initial learning rate') parser.add_argument('--warmup_ratio', type=float, default=0.1, help='warmup ratio') parser.add_argument('--model_path',", "type=float, default=3e-5, help='The initial learning rate') parser.add_argument('--warmup_ratio', type=float, default=0.1, help='warmup", "batch_size=args.batch_size, num_workers=args.num_workers) checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss', dirpath=args.default_root_dir, filename='model_chp/{epoch:02d}-{val_loss:.3f}', verbose=True, save_last=True, mode='min',", "num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps) lr_scheduler = {'scheduler': scheduler, 'monitor': 'loss', 'interval': 'step',", "of workers {num_workers}, data length {data_len}') num_train_steps = int(data_len /", "def setup(self, stage): # split dataset self.train = KoBARTSummaryDataset(self.train_file_path, self.tok,", "from kobart import get_pytorch_kobart_model, get_kobart_tokenizer parser = argparse.ArgumentParser(description='KoBART translation') parser.add_argument('--checkpoint_path',", "num_workers=args.num_workers) checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss', dirpath=args.default_root_dir, filename='model_chp/{epoch:02d}-{val_loss:.3f}', verbose=True, save_last=True, mode='min', save_top_k=-1,", "translation') parser.add_argument('--checkpoint_path', type=str, help='checkpoint path') logger = logging.getLogger() logger.setLevel(logging.INFO) class", "int(data_len / (self.hparams.batch_size * num_workers) * self.hparams.max_epochs) logging.info(f'num_train_steps : {num_train_steps}')", "= outs.loss self.log('train_loss', loss, prog_bar=True) return loss def validation_step(self, batch,", "parser.add_argument('--checkpoint_path', type=str, help='checkpoint path') logger = logging.getLogger() logger.setLevel(logging.INFO) class ArgsBase():", "from transformers.optimization import AdamW, get_cosine_schedule_with_warmup from kobart import get_pytorch_kobart_model, get_kobart_tokenizer", "= pl.Trainer.add_argparse_args(parser) args = parser.parse_args() logging.info(args) model = KoBARTConditionalGeneration(args) dm", "= pl.callbacks.ModelCheckpoint(monitor='val_loss', dirpath=args.default_root_dir, filename='model_chp/{epoch:02d}-{val_loss:.3f}', verbose=True, save_last=True, mode='min', save_top_k=-1, prefix='kobart_translation') tb_logger", "return parser # OPTIONAL, called for every GPU/machine (assigning state", "initial learning rate') parser.add_argument('--warmup_ratio', type=float, default=0.1, help='warmup ratio') parser.add_argument('--model_path', type=str,", "logger = logging.getLogger() logger.setLevel(logging.INFO) class ArgsBase(): @staticmethod def add_model_specific_args(parent_parser): parser", "test_dataloader(self): test = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return test class", "self.bos_token = '<s>' self.eos_token = '</s>' self.pad_token_id = 0 self.tokenizer", "rate') parser.add_argument('--warmup_ratio', type=float, default=0.1, help='warmup ratio') parser.add_argument('--model_path', type=str, default=None, help='kobart", "path') logger = logging.getLogger() logger.setLevel(logging.INFO) class ArgsBase(): @staticmethod def add_model_specific_args(parent_parser):", "for n, p in param_optimizer if not any( nd in", "in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer", "KoBARTConditionalGeneration(Base): def __init__(self, hparams, **kwargs): super(KoBARTConditionalGeneration, self).__init__(hparams, **kwargs) self.model =", "help='warmup ratio') parser.add_argument('--model_path', type=str, default=None, help='kobart model path') return parser", "ArgsBase.add_model_specific_args(parser) parser = KobartSummaryModule.add_model_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) args = parser.parse_args()", "np import pandas as pd import pytorch_lightning as pl import", "**kwargs): super(KoBARTConditionalGeneration, self).__init__(hparams, **kwargs) self.model = BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model()) self.model.train() self.bos_token =", "labels=inputs['labels'], return_dict=True) def training_step(self, batch, batch_idx): outs = self(batch) loss", "if __name__ == '__main__': parser = Base.add_model_specific_args(parser) parser = ArgsBase.add_model_specific_args(parser)", "tok self.num_workers = num_workers @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser(", "self.test = KoBARTSummaryDataset(self.test_file_path, self.tok, self.max_len) def train_dataloader(self): train = DataLoader(self.train,", "kobart import get_pytorch_kobart_model, get_kobart_tokenizer parser = argparse.ArgumentParser(description='KoBART translation') parser.add_argument('--checkpoint_path', type=str,", "not None else 1) * (self.hparams.num_nodes if self.hparams.num_nodes is not", "any( nd in n for nd in no_decay)], 'weight_decay': 0.01},", "validation_step(self, batch, batch_idx): outs = self(batch) loss = outs['loss'] return", "dm = KobartSummaryModule(args.train_file, args.test_file, None, max_len=args.max_len, batch_size=args.batch_size, num_workers=args.num_workers) checkpoint_callback =", "parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--num_workers', type=int, default=5, help='num of", "return [optimizer], [lr_scheduler] class KoBARTConditionalGeneration(Base): def __init__(self, hparams, **kwargs): super(KoBARTConditionalGeneration,", "dataset self.train = KoBARTSummaryDataset(self.train_file_path, self.tok, self.max_len) self.test = KoBARTSummaryDataset(self.test_file_path, self.tok,", "tok, max_len=512, batch_size=8, num_workers=5): super().__init__() self.batch_size = batch_size self.max_len =", "type=int, default=5, help='num of worker for dataloader') return parser #", "if self.hparams.num_nodes is not None else 1) data_len = len(self.train_dataloader().dataset)", "help='checkpoint path') logger = logging.getLogger() logger.setLevel(logging.INFO) class ArgsBase(): @staticmethod def", "def training_step(self, batch, batch_idx): outs = self(batch) loss = outs.loss", "* num_workers) * self.hparams.max_epochs) logging.info(f'num_train_steps : {num_train_steps}') num_warmup_steps = int(num_train_steps", "= self(batch) loss = outs['loss'] return (loss) def validation_epoch_end(self, outputs):", "def configure_optimizers(self): # Prepare optimizer param_optimizer = list(self.model.named_parameters()) no_decay =", "return train def val_dataloader(self): val = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False)", "= [] for loss in outputs: losses.append(loss) self.log('val_loss', torch.stack(losses).mean(), prog_bar=True)", "test = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return test class Base(pl.LightningModule):", "return test class Base(pl.LightningModule): def __init__(self, hparams, **kwargs) -> None:", "= KoBARTSummaryDataset(self.test_file_path, self.tok, self.max_len) def train_dataloader(self): train = DataLoader(self.train, batch_size=self.batch_size,", "num_training_steps=num_train_steps) lr_scheduler = {'scheduler': scheduler, 'monitor': 'loss', 'interval': 'step', 'frequency':", "'__main__': parser = Base.add_model_specific_args(parser) parser = ArgsBase.add_model_specific_args(parser) parser = KobartSummaryModule.add_model_specific_args(parser)", "= [ {'params': [p for n, p in param_optimizer if", "file') parser.add_argument('--test_file', type=str, default='data/test.tsv', help='test file') parser.add_argument('--batch_size', type=int, default=28, help='')", "list(self.model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params':", "parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--batch-size', type=int, default=14, help='batch size", "self.train = KoBARTSummaryDataset(self.train_file_path, self.tok, self.max_len) self.test = KoBARTSummaryDataset(self.test_file_path, self.tok, self.max_len)", "return_dict=True) def training_step(self, batch, batch_idx): outs = self(batch) loss =", "torch.utils.data import DataLoader, Dataset from dataset import KoBARTSummaryDataset from transformers", "get_kobart_tokenizer() def forward(self, inputs): attention_mask = inputs['input_ids'].ne(self.pad_token_id).float() decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float()", "parser = ArgsBase.add_model_specific_args(parser) parser = KobartSummaryModule.add_model_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) args", "= argparse.ArgumentParser(description='KoBART translation') parser.add_argument('--checkpoint_path', type=str, help='checkpoint path') logger = logging.getLogger()", "= '<s>' self.eos_token = '</s>' self.pad_token_id = 0 self.tokenizer =", "] optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.lr, correct_bias=False) # warm up lr", "for n, p in param_optimizer if any( nd in n", "= '</s>' self.pad_token_id = 0 self.tokenizer = get_kobart_tokenizer() def forward(self,", "= pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs')) lr_logger = pl.callbacks.LearningRateMonitor() trainer = pl.Trainer.from_argparse_args(args, logger=tb_logger,", "attention_mask=attention_mask, decoder_input_ids=inputs['decoder_input_ids'], decoder_attention_mask=decoder_attention_mask, labels=inputs['labels'], return_dict=True) def training_step(self, batch, batch_idx): outs", "add model specific args parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False) parser.add_argument('--batch-size',", "type=str, default='data/test.tsv', help='test file') parser.add_argument('--batch_size', type=int, default=28, help='') parser.add_argument('--max_len', type=int,", "* self.hparams.max_epochs) logging.info(f'num_train_steps : {num_train_steps}') num_warmup_steps = int(num_train_steps * self.hparams.warmup_ratio)", "def validation_epoch_end(self, outputs): losses = [] for loss in outputs:", "from pytorch_lightning import loggers as pl_loggers from torch.utils.data import DataLoader,", "add_help=False) parser.add_argument('--num_workers', type=int, default=5, help='num of worker for dataloader') return", "return (loss) def validation_epoch_end(self, outputs): losses = [] for loss", "default=None, help='kobart model path') return parser def configure_optimizers(self): # Prepare", "self.hparams.max_epochs) logging.info(f'num_train_steps : {num_train_steps}') num_warmup_steps = int(num_train_steps * self.hparams.warmup_ratio) logging.info(f'num_warmup_steps", "'</s>' self.pad_token_id = 0 self.tokenizer = get_kobart_tokenizer() def forward(self, inputs):", "= Base.add_model_specific_args(parser) parser = ArgsBase.add_model_specific_args(parser) parser = KobartSummaryModule.add_model_specific_args(parser) parser =", "add_help=False) parser.add_argument('--train_file', type=str, default='data/train.tsv', help='train file') parser.add_argument('--test_file', type=str, default='data/test.tsv', help='test", "= KobartSummaryModule(args.train_file, args.test_file, None, max_len=args.max_len, batch_size=args.batch_size, num_workers=args.num_workers) checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss',", "return val def test_dataloader(self): test = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False)", "# split dataset self.train = KoBARTSummaryDataset(self.train_file_path, self.tok, self.max_len) self.test =", "num_workers) * self.hparams.max_epochs) logging.info(f'num_train_steps : {num_train_steps}') num_warmup_steps = int(num_train_steps *", "forward(self, inputs): attention_mask = inputs['input_ids'].ne(self.pad_token_id).float() decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float() return self.model(input_ids=inputs['input_ids'],", "self.max_len = max_len self.train_file_path = train_file self.test_file_path = test_file if", "self(batch) loss = outs.loss self.log('train_loss', loss, prog_bar=True) return loss def", "__init__(self, train_file, test_file, tok, max_len=512, batch_size=8, num_workers=5): super().__init__() self.batch_size =", "num_train_steps = int(data_len / (self.hparams.batch_size * num_workers) * self.hparams.max_epochs) logging.info(f'num_train_steps", "outs.loss self.log('train_loss', loss, prog_bar=True) return loss def validation_step(self, batch, batch_idx):", "every GPU/machine (assigning state is OK) def setup(self, stage): #", "type=str, default='data/train.tsv', help='train file') parser.add_argument('--test_file', type=str, default='data/test.tsv', help='test file') parser.add_argument('--batch_size',", "test_file if tok is None: self.tok = get_kobart_tokenizer() else: self.tok", "self.model.train() self.bos_token = '<s>' self.eos_token = '</s>' self.pad_token_id = 0", "self).__init__(hparams, **kwargs) self.model = BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model()) self.model.train() self.bos_token = '<s>' self.eos_token", "len') return parser class KobartSummaryModule(pl.LightningDataModule): def __init__(self, train_file, test_file, tok,", "'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p", "None, max_len=args.max_len, batch_size=args.batch_size, num_workers=args.num_workers) checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss', dirpath=args.default_root_dir, filename='model_chp/{epoch:02d}-{val_loss:.3f}', verbose=True,", "transformers.optimization import AdamW, get_cosine_schedule_with_warmup from kobart import get_pytorch_kobart_model, get_kobart_tokenizer parser", "outputs): losses = [] for loss in outputs: losses.append(loss) self.log('val_loss',", "= pl.callbacks.LearningRateMonitor() trainer = pl.Trainer.from_argparse_args(args, logger=tb_logger, callbacks=[checkpoint_callback, lr_logger]) trainer.fit(model, dm)", "KoBARTSummaryDataset(self.train_file_path, self.tok, self.max_len) self.test = KoBARTSummaryDataset(self.test_file_path, self.tok, self.max_len) def train_dataloader(self):", "default=14, help='batch size for training (default: 96)') parser.add_argument('--lr', type=float, default=3e-5,", "import os import numpy as np import pandas as pd", "/ (self.hparams.batch_size * num_workers) * self.hparams.max_epochs) logging.info(f'num_train_steps : {num_train_steps}') num_warmup_steps", "loss def validation_step(self, batch, batch_idx): outs = self(batch) loss =", "loss, prog_bar=True) return loss def validation_step(self, batch, batch_idx): outs =", "default=5, help='num of worker for dataloader') return parser # OPTIONAL,", "decoder_input_ids=inputs['decoder_input_ids'], decoder_attention_mask=decoder_attention_mask, labels=inputs['labels'], return_dict=True) def training_step(self, batch, batch_idx): outs =", "def add_model_specific_args(parent_parser): # add model specific args parser = argparse.ArgumentParser(", "not None else 1) data_len = len(self.train_dataloader().dataset) logging.info(f'number of workers", "parser.add_argument('--warmup_ratio', type=float, default=0.1, help='warmup ratio') parser.add_argument('--model_path', type=str, default=None, help='kobart model", "batch_size self.max_len = max_len self.train_file_path = train_file self.test_file_path = test_file", "self.tok = tok self.num_workers = num_workers @staticmethod def add_model_specific_args(parent_parser): parser", "1) data_len = len(self.train_dataloader().dataset) logging.info(f'number of workers {num_workers}, data length", "class ArgsBase(): @staticmethod def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False)", "for training (default: 96)') parser.add_argument('--lr', type=float, default=3e-5, help='The initial learning", "num_workers=self.num_workers, shuffle=True) return train def val_dataloader(self): val = DataLoader(self.test, batch_size=self.batch_size,", "= KoBARTSummaryDataset(self.train_file_path, self.tok, self.max_len) self.test = KoBARTSummaryDataset(self.test_file_path, self.tok, self.max_len) def", "'interval': 'step', 'frequency': 1} return [optimizer], [lr_scheduler] class KoBARTConditionalGeneration(Base): def", "batch_idx): outs = self(batch) loss = outs.loss self.log('train_loss', loss, prog_bar=True)", "class KobartSummaryModule(pl.LightningDataModule): def __init__(self, train_file, test_file, tok, max_len=512, batch_size=8, num_workers=5):", "None else 1) * (self.hparams.num_nodes if self.hparams.num_nodes is not None", "import AdamW, get_cosine_schedule_with_warmup from kobart import get_pytorch_kobart_model, get_kobart_tokenizer parser =", "seq len') return parser class KobartSummaryModule(pl.LightningDataModule): def __init__(self, train_file, test_file,", "default='data/test.tsv', help='test file') parser.add_argument('--batch_size', type=int, default=28, help='') parser.add_argument('--max_len', type=int, default=512,", "Base(pl.LightningModule): def __init__(self, hparams, **kwargs) -> None: super(Base, self).__init__() self.hparams", "lr_logger = pl.callbacks.LearningRateMonitor() trainer = pl.Trainer.from_argparse_args(args, logger=tb_logger, callbacks=[checkpoint_callback, lr_logger]) trainer.fit(model,", "os import numpy as np import pandas as pd import", "-> None: super(Base, self).__init__() self.hparams = hparams @staticmethod def add_model_specific_args(parent_parser):", "as pl_loggers from torch.utils.data import DataLoader, Dataset from dataset import", "configure_optimizers(self): # Prepare optimizer param_optimizer = list(self.model.named_parameters()) no_decay = ['bias',", "no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.lr, correct_bias=False) #", "= self(batch) loss = outs.loss self.log('train_loss', loss, prog_bar=True) return loss", "get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps) lr_scheduler = {'scheduler': scheduler, 'monitor': 'loss',", "OPTIONAL, called for every GPU/machine (assigning state is OK) def", "is OK) def setup(self, stage): # split dataset self.train =", "# add model specific args parser = argparse.ArgumentParser( parents=[parent_parser], add_help=False)", "inputs): attention_mask = inputs['input_ids'].ne(self.pad_token_id).float() decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float() return self.model(input_ids=inputs['input_ids'], attention_mask=attention_mask,", "pl.callbacks.ModelCheckpoint(monitor='val_loss', dirpath=args.default_root_dir, filename='model_chp/{epoch:02d}-{val_loss:.3f}', verbose=True, save_last=True, mode='min', save_top_k=-1, prefix='kobart_translation') tb_logger =", "pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs')) lr_logger = pl.callbacks.LearningRateMonitor() trainer = pl.Trainer.from_argparse_args(args, logger=tb_logger, callbacks=[checkpoint_callback,", "file') parser.add_argument('--batch_size', type=int, default=28, help='') parser.add_argument('--max_len', type=int, default=512, help='max seq", "= train_file self.test_file_path = test_file if tok is None: self.tok", "train = DataLoader(self.train, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) return train def val_dataloader(self):", "None: super(Base, self).__init__() self.hparams = hparams @staticmethod def add_model_specific_args(parent_parser): #", "pd import pytorch_lightning as pl import torch from pytorch_lightning import", "pl.Trainer.add_argparse_args(parser) args = parser.parse_args() logging.info(args) model = KoBARTConditionalGeneration(args) dm =", "dataloader') return parser # OPTIONAL, called for every GPU/machine (assigning", "param_optimizer if any( nd in n for nd in no_decay)],", "parser.parse_args() logging.info(args) model = KoBARTConditionalGeneration(args) dm = KobartSummaryModule(args.train_file, args.test_file, None,", "parser.add_argument('--max_len', type=int, default=512, help='max seq len') return parser class KobartSummaryModule(pl.LightningDataModule):", "loggers as pl_loggers from torch.utils.data import DataLoader, Dataset from dataset", "self.model = BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model()) self.model.train() self.bos_token = '<s>' self.eos_token = '</s>'", "* self.hparams.warmup_ratio) logging.info(f'num_warmup_steps : {num_warmup_steps}') scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=num_warmup_steps,", "{num_train_steps}') num_warmup_steps = int(num_train_steps * self.hparams.warmup_ratio) logging.info(f'num_warmup_steps : {num_warmup_steps}') scheduler", "val def test_dataloader(self): test = DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return", "workers {num_workers}, data length {data_len}') num_train_steps = int(data_len / (self.hparams.batch_size", "= DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) return val def test_dataloader(self): test", "'loss', 'interval': 'step', 'frequency': 1} return [optimizer], [lr_scheduler] class KoBARTConditionalGeneration(Base):", "help='num of worker for dataloader') return parser # OPTIONAL, called", "= inputs['input_ids'].ne(self.pad_token_id).float() decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float() return self.model(input_ids=inputs['input_ids'], attention_mask=attention_mask, decoder_input_ids=inputs['decoder_input_ids'], decoder_attention_mask=decoder_attention_mask," ]
[ "state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"roller\", \"rollerEnergy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value:", "-> None: \"\"\"Initialize sensor.\"\"\" super().__init__(wrapper, block, attribute, description) self._last_value: float", "import StateType from homeassistant.util import dt from . import ShellyDeviceWrapper", "block: { \"Operational hours\": round(block.totalWorkTime / 3600, 1) }, ),", "100, 1), device_class=sensor.DEVICE_CLASS_POWER_FACTOR, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT,", "SENSORS, ShellySensor ) await async_setup_entry_rest( hass, config_entry, async_add_entities, REST_SENSORS, ShellyRestSensor", "value return self.attribute_value @property def state_class(self) -> str | None:", "), (\"relay\", \"totalWorkTime\"): BlockAttributeDescription( name=\"Lamp Life\", unit=PERCENTAGE, icon=\"mdi:progress-wrench\", value=lambda value:", "1, ), (\"device\", \"deviceTemp\"): BlockAttributeDescription( name=\"Device Temperature\", unit=temperature_unit, value=lambda value:", "SensorEntity): \"\"\"Represent a shelly sleeping sensor.\"\"\" @property def state(self) ->", "hass, config_entry, async_add_entities, REST_SENSORS, ShellyRestSensor ) class ShellySensor(ShellyBlockAttributeEntity, SensorEntity): \"\"\"Represent", "LAST_RESET_NEVER: self._attr_last_reset = dt.utc_from_timestamp(0) elif description.last_reset == LAST_RESET_UPTIME: self._attr_last_reset =", "else: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySensor ) await", "state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), \"uptime\": RestAttributeDescription( name=\"Uptime\", value=get_device_uptime, device_class=sensor.DEVICE_CLASS_TIMESTAMP, default_enabled=False, ),", "name=\"Tilt\", unit=DEGREE, icon=\"mdi:angle-acute\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"totalWorkTime\"): BlockAttributeDescription( name=\"Lamp Life\",", "LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS from .entity import ( BlockAttributeDescription, RestAttributeDescription, ShellyBlockAttributeEntity,", "(\"sensor\", \"concentration\"): BlockAttributeDescription( name=\"Gas Concentration\", unit=CONCENTRATION_PARTS_PER_MILLION, icon=\"mdi:gauge\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\",", "value: round(value * 100, 1), device_class=sensor.DEVICE_CLASS_POWER_FACTOR, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"power\"):", "value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"powerFactor\"): BlockAttributeDescription( name=\"Power", "1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"sensor\", \"concentration\"): BlockAttributeDescription( name=\"Gas", "value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp !=", "sensor.\"\"\" if ( self.description.last_reset == LAST_RESET_UPTIME and self.attribute_value is not", "str | None: \"\"\"State class of sensor.\"\"\" return self.description.state_class @property", "for entity %s\", self.name) self._last_value = value return self.attribute_value @property", "settings, _: settings.get(\"external_power\") == 1, ), (\"device\", \"deviceTemp\"): BlockAttributeDescription( name=\"Device", "SIGNAL_STRENGTH_DECIBELS_MILLIWATT, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback", "ShellySensor ) await async_setup_entry_rest( hass, config_entry, async_add_entities, REST_SENSORS, ShellyRestSensor )", "unit=CONCENTRATION_PARTS_PER_MILLION, icon=\"mdi:gauge\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"extTemp\"): BlockAttributeDescription( name=\"Temperature\", unit=temperature_unit, value=lambda", "LAST_RESET_UPTIME: self._attr_last_reset = ( dt.utcnow() - timedelta(seconds=wrapper.device.status[\"uptime\"]) ).replace(second=0, microsecond=0) @property", ".entity import ( BlockAttributeDescription, RestAttributeDescription, ShellyBlockAttributeEntity, ShellyRestAttributeEntity, ShellySleepingBlockAttributeEntity, async_setup_entry_attribute_entities, async_setup_entry_rest,", "round(value, 1), device_class=sensor.DEVICE_CLASS_HUMIDITY, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999),", "icon=\"mdi:cog-transfer\", value=lambda value: value, extra_state_attributes=lambda block: {\"self_test\": block.selfTest}, ), }", "self._last_value: float | None = None if description.last_reset == LAST_RESET_NEVER:", "a shelly sensor.\"\"\" def __init__( self, wrapper: ShellyDeviceWrapper, block: aioshelly.Block,", "def state(self) -> StateType: \"\"\"Return value of sensor.\"\"\" return self.attribute_value", "round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"sensorOp\"): BlockAttributeDescription( name=\"Operation\", icon=\"mdi:cog-transfer\",", "self.description.unit class ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly sleeping sensor.\"\"\" @property", "name=\"ADC\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\",", "default_enabled=False, ), \"uptime\": RestAttributeDescription( name=\"Uptime\", value=get_device_uptime, device_class=sensor.DEVICE_CLASS_TIMESTAMP, default_enabled=False, ), }", "float | None = None if description.last_reset == LAST_RESET_NEVER: self._attr_last_reset", "%s\", self.name) self._last_value = value return self.attribute_value @property def state_class(self)", "value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"device\", \"energy\"): BlockAttributeDescription( name=\"Energy\",", "@property def state(self) -> StateType: \"\"\"Return value of sensor.\"\"\" if", "homeassistant.components.sensor import SensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import", "= { \"rssi\": RestAttributeDescription( name=\"RSSI\", unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, value=lambda status, _: status[\"wifi_sta\"][\"rssi\"],", "device_class=sensor.DEVICE_CLASS_TIMESTAMP, default_enabled=False, ), } async def async_setup_entry( hass: HomeAssistant, config_entry:", "value=lambda status, _: status[\"wifi_sta\"][\"rssi\"], device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), \"uptime\": RestAttributeDescription(", "), (\"light\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1),", "round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"device\", \"power\"): BlockAttributeDescription( name=\"Power\",", "\"rssi\": RestAttributeDescription( name=\"RSSI\", unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, value=lambda status, _: status[\"wifi_sta\"][\"rssi\"], device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH, state_class=sensor.STATE_CLASS_MEASUREMENT,", "value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"power\"): BlockAttributeDescription( name=\"Power\",", "sensor.\"\"\" return cast(str, self._unit) class ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity): \"\"\"Represent a shelly", "self._attr_last_reset = dt.utcnow().replace(second=0, microsecond=0) _LOGGER.info(\"Energy reset detected for entity %s\",", "Shelly.\"\"\" from __future__ import annotations from datetime import timedelta import", "hass, config_entry, async_add_entities, SENSORS, ShellySensor ) await async_setup_entry_rest( hass, config_entry,", "2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"emeter\", \"energyReturned\"): BlockAttributeDescription( name=\"Energy Returned\",", "value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_HUMIDITY, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp", "(\"emeter\", \"current\"): BlockAttributeDescription( name=\"Current\", unit=ELECTRIC_CURRENT_AMPERE, value=lambda value: value, device_class=sensor.DEVICE_CLASS_CURRENT, state_class=sensor.STATE_CLASS_MEASUREMENT,", "logging.getLogger(__name__) SENSORS: Final = { (\"device\", \"battery\"): BlockAttributeDescription( name=\"Battery\", unit=PERCENTAGE,", "(\"sensor\", \"luminosity\"): BlockAttributeDescription( name=\"Luminosity\", unit=LIGHT_LUX, device_class=sensor.DEVICE_CLASS_ILLUMINANCE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"tilt\"):", "ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: \"\"\"Set up sensors for", "if self._last_value and self._last_value > value: self._attr_last_reset = dt.utcnow().replace(second=0, microsecond=0)", "detected for entity %s\", self.name) self._last_value = value return self.attribute_value", "PERCENTAGE, POWER_WATT, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform", "shelly REST sensor.\"\"\" @property def state(self) -> StateType: \"\"\"Return value", "1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"roller\", \"rollerEnergy\"): BlockAttributeDescription( name=\"Energy\",", "config_entry.data[\"sleep_period\"]: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySleepingSensor ) else:", "aioshelly from homeassistant.components import sensor from homeassistant.components.sensor import SensorEntity from", "unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY,", "self.attribute_value @property def state_class(self) -> str | None: \"\"\"State class", "state(self) -> StateType: \"\"\"Return value of sensor.\"\"\" return self.attribute_value @property", "\"\"\"Represent a shelly sleeping sensor.\"\"\" @property def state(self) -> StateType:", "async_setup_entry_rest, ) from .utils import get_device_uptime, temperature_unit _LOGGER: Final =", "block.extTemp != 999), ), (\"sensor\", \"luminosity\"): BlockAttributeDescription( name=\"Luminosity\", unit=LIGHT_LUX, device_class=sensor.DEVICE_CLASS_ILLUMINANCE,", "state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"roller\", \"rollerPower\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value,", "unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, value=lambda status, _: status[\"wifi_sta\"][\"rssi\"], device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), \"uptime\":", "LIGHT_LUX, PERCENTAGE, POWER_WATT, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, ) from homeassistant.core import HomeAssistant from", "unit_of_measurement(self) -> str | None: \"\"\"Return unit of sensor.\"\"\" return", "device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"emeter\", \"energyReturned\"): BlockAttributeDescription( name=\"Energy Returned\", unit=ENERGY_KILO_WATT_HOUR,", "BlockAttributeDescription( name=\"Humidity\", unit=PERCENTAGE, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_HUMIDITY, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda", "await async_setup_entry_rest( hass, config_entry, async_add_entities, REST_SENSORS, ShellyRestSensor ) class ShellySensor(ShellyBlockAttributeEntity,", "round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"voltage\"): BlockAttributeDescription( name=\"Voltage\", unit=ELECTRIC_POTENTIAL_VOLT,", "unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"roller\", \"rollerPower\"):", "ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, LIGHT_LUX, PERCENTAGE, POWER_WATT, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, ) from homeassistant.core", "BlockAttributeDescription( name=\"Power Factor\", unit=PERCENTAGE, value=lambda value: round(value * 100, 1),", "state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"tilt\"): BlockAttributeDescription( name=\"Tilt\", unit=DEGREE, icon=\"mdi:angle-acute\", state_class=sensor.STATE_CLASS_MEASUREMENT, ),", "/ 3600, 1) }, ), (\"adc\", \"adc\"): BlockAttributeDescription( name=\"ADC\", unit=ELECTRIC_POTENTIAL_VOLT,", "unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"powerFactor\"):", "block.selfTest}, ), } REST_SENSORS: Final = { \"rssi\": RestAttributeDescription( name=\"RSSI\",", "async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, )", "!= 999), ), (\"sensor\", \"humidity\"): BlockAttributeDescription( name=\"Humidity\", unit=PERCENTAGE, value=lambda value:", "a shelly sleeping sensor.\"\"\" @property def state(self) -> StateType: \"\"\"Return", "SensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( CONCENTRATION_PARTS_PER_MILLION,", "/ 3600 / SHAIR_MAX_WORK_HOURS), 1), extra_state_attributes=lambda block: { \"Operational hours\":", "block: cast(bool, block.extTemp != 999), ), (\"sensor\", \"humidity\"): BlockAttributeDescription( name=\"Humidity\",", "-> StateType: \"\"\"Return value of sensor.\"\"\" return self.attribute_value @property def", "| None: \"\"\"Return unit of sensor.\"\"\" return self.description.unit class ShellySleepingSensor(ShellySleepingBlockAttributeEntity,", "async_add_entities, SENSORS, ShellySensor ) await async_setup_entry_rest( hass, config_entry, async_add_entities, REST_SENSORS,", "value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"roller\", \"rollerPower\"): BlockAttributeDescription(", "removal_condition=lambda settings, _: settings.get(\"external_power\") == 1, ), (\"device\", \"deviceTemp\"): BlockAttributeDescription(", "sensor.\"\"\" @property def state(self) -> StateType: \"\"\"Return value of sensor.\"\"\"", "HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: \"\"\"Set up", "unit=PERCENTAGE, value=lambda value: round(value * 100, 1), device_class=sensor.DEVICE_CLASS_POWER_FACTOR, state_class=sensor.STATE_CLASS_MEASUREMENT, ),", "sensor.\"\"\" def __init__( self, wrapper: ShellyDeviceWrapper, block: aioshelly.Block, attribute: str,", "\"\"\"Return unit of sensor.\"\"\" return self.description.unit class ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity): \"\"\"Represent", "1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"emeter\", \"energy\"): BlockAttributeDescription( name=\"Energy\",", "\"extTemp\"): BlockAttributeDescription( name=\"Temperature\", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT,", "self, wrapper: ShellyDeviceWrapper, block: aioshelly.Block, attribute: str, description: BlockAttributeDescription, )", "await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySensor ) await async_setup_entry_rest(", "class ShellySensor(ShellyBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly sensor.\"\"\" def __init__( self,", "entity %s\", self.name) self._last_value = value return self.attribute_value @property def", "StateType from homeassistant.util import dt from . import ShellyDeviceWrapper from", "BlockAttributeDescription( name=\"Lamp Life\", unit=PERCENTAGE, icon=\"mdi:progress-wrench\", value=lambda value: round(100 - (value", "), (\"emeter\", \"voltage\"): BlockAttributeDescription( name=\"Voltage\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1),", "\"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT,", ".utils import get_device_uptime, temperature_unit _LOGGER: Final = logging.getLogger(__name__) SENSORS: Final", "Final, cast import aioshelly from homeassistant.components import sensor from homeassistant.components.sensor", "SENSORS: Final = { (\"device\", \"battery\"): BlockAttributeDescription( name=\"Battery\", unit=PERCENTAGE, device_class=sensor.DEVICE_CLASS_BATTERY,", "value: round(100 - (value / 3600 / SHAIR_MAX_WORK_HOURS), 1), extra_state_attributes=lambda", "), } async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities:", "-> str | None: \"\"\"Return unit of sensor.\"\"\" return cast(str,", "ShellySensor(ShellyBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly sensor.\"\"\" def __init__( self, wrapper:", "( dt.utcnow() - timedelta(seconds=wrapper.device.status[\"uptime\"]) ).replace(second=0, microsecond=0) @property def state(self) ->", "import dt from . import ShellyDeviceWrapper from .const import LAST_RESET_NEVER,", "device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), \"uptime\": RestAttributeDescription( name=\"Uptime\", value=get_device_uptime, device_class=sensor.DEVICE_CLASS_TIMESTAMP, default_enabled=False,", "== LAST_RESET_NEVER: self._attr_last_reset = dt.utc_from_timestamp(0) elif description.last_reset == LAST_RESET_UPTIME: self._attr_last_reset", "unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"sensorOp\"):", "name=\"Current\", unit=ELECTRIC_CURRENT_AMPERE, value=lambda value: value, device_class=sensor.DEVICE_CLASS_CURRENT, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"light\", \"power\"):", "RestAttributeDescription( name=\"Uptime\", value=get_device_uptime, device_class=sensor.DEVICE_CLASS_TIMESTAMP, default_enabled=False, ), } async def async_setup_entry(", "value of sensor.\"\"\" return self.attribute_value @property def state_class(self) -> str", "description.last_reset == LAST_RESET_NEVER: self._attr_last_reset = dt.utc_from_timestamp(0) elif description.last_reset == LAST_RESET_UPTIME:", "homeassistant.helpers.typing import StateType from homeassistant.util import dt from . import", "def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) ->", "\"\"\"Return value of sensor.\"\"\" if ( self.description.last_reset == LAST_RESET_UPTIME and", "name=\"Luminosity\", unit=LIGHT_LUX, device_class=sensor.DEVICE_CLASS_ILLUMINANCE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"tilt\"): BlockAttributeDescription( name=\"Tilt\", unit=DEGREE,", "unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"power\"):", "/ SHAIR_MAX_WORK_HOURS), 1), extra_state_attributes=lambda block: { \"Operational hours\": round(block.totalWorkTime /", "60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, last_reset=LAST_RESET_UPTIME, ), (\"relay\",", "def state(self) -> StateType: \"\"\"Return value of sensor.\"\"\" if (", "import logging from typing import Final, cast import aioshelly from", "device_class=sensor.DEVICE_CLASS_BATTERY, state_class=sensor.STATE_CLASS_MEASUREMENT, removal_condition=lambda settings, _: settings.get(\"external_power\") == 1, ), (\"device\",", "state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"sensorOp\"): BlockAttributeDescription( name=\"Operation\", icon=\"mdi:cog-transfer\", value=lambda value: value,", "(\"sensor\", \"tilt\"): BlockAttributeDescription( name=\"Tilt\", unit=DEGREE, icon=\"mdi:angle-acute\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"totalWorkTime\"):", "temperature_unit _LOGGER: Final = logging.getLogger(__name__) SENSORS: Final = { (\"device\",", "} async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback,", "\"\"\"Initialize sensor.\"\"\" super().__init__(wrapper, block, attribute, description) self._last_value: float | None", "1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999), ),", "StateType: \"\"\"Return value of sensor.\"\"\" return self.attribute_value @property def state_class(self)", "2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"sensor\", \"concentration\"): BlockAttributeDescription( name=\"Gas Concentration\",", "60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"roller\", \"rollerEnergy\"):", "unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool,", "round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"emeter\", \"current\"): BlockAttributeDescription( name=\"Current\",", "name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\",", "round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999),", "unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER,", "from .const import LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS from .entity import (", "BlockAttributeDescription( name=\"Energy Returned\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000, 2),", "/ 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"light\", \"energy\"): BlockAttributeDescription(", "round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"powerFactor\"): BlockAttributeDescription( name=\"Power Factor\",", "block: {\"self_test\": block.selfTest}, ), } REST_SENSORS: Final = { \"rssi\":", "async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySensor ) await async_setup_entry_rest( hass,", "value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"emeter\", \"current\"): BlockAttributeDescription(", "(\"emeter\", \"voltage\"): BlockAttributeDescription( name=\"Voltage\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE,", "DEGREE, ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, LIGHT_LUX, PERCENTAGE, POWER_WATT, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, ) from", "self._attr_last_reset = dt.utc_from_timestamp(0) elif description.last_reset == LAST_RESET_UPTIME: self._attr_last_reset = (", "timedelta import logging from typing import Final, cast import aioshelly", "cast import aioshelly from homeassistant.components import sensor from homeassistant.components.sensor import", "self._unit) class ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity): \"\"\"Represent a shelly REST sensor.\"\"\" @property", "(value / 3600 / SHAIR_MAX_WORK_HOURS), 1), extra_state_attributes=lambda block: { \"Operational", "state_class(self) -> str | None: \"\"\"State class of sensor.\"\"\" return", "cast(str, self._unit) class ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity): \"\"\"Represent a shelly REST sensor.\"\"\"", "async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None:", "BlockAttributeDescription( name=\"ADC\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ),", "ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity): \"\"\"Represent a shelly REST sensor.\"\"\" @property def state(self)", "), (\"sensor\", \"humidity\"): BlockAttributeDescription( name=\"Humidity\", unit=PERCENTAGE, value=lambda value: round(value, 1),", "\"powerFactor\"): BlockAttributeDescription( name=\"Power Factor\", unit=PERCENTAGE, value=lambda value: round(value * 100,", "round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, last_reset=LAST_RESET_UPTIME,", "default_enabled=False, last_reset=LAST_RESET_UPTIME, ), (\"relay\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value:", "cast(bool, block.extTemp != 999), ), (\"sensor\", \"humidity\"): BlockAttributeDescription( name=\"Humidity\", unit=PERCENTAGE,", "device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value:", "1), device_class=sensor.DEVICE_CLASS_POWER_FACTOR, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda", "str | None: \"\"\"Return unit of sensor.\"\"\" return cast(str, self._unit)", "elif description.last_reset == LAST_RESET_UPTIME: self._attr_last_reset = ( dt.utcnow() - timedelta(seconds=wrapper.device.status[\"uptime\"])", "(\"device\", \"deviceTemp\"): BlockAttributeDescription( name=\"Device Temperature\", unit=temperature_unit, value=lambda value: round(value, 1),", "| None: \"\"\"State class of sensor.\"\"\" return self.description.state_class @property def", "value: self._attr_last_reset = dt.utcnow().replace(second=0, microsecond=0) _LOGGER.info(\"Energy reset detected for entity", "ShellyDeviceWrapper from .const import LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS from .entity import", "BlockAttributeDescription( name=\"Battery\", unit=PERCENTAGE, device_class=sensor.DEVICE_CLASS_BATTERY, state_class=sensor.STATE_CLASS_MEASUREMENT, removal_condition=lambda settings, _: settings.get(\"external_power\") ==", "available=lambda block: cast(bool, block.extTemp != 999), ), (\"sensor\", \"humidity\"): BlockAttributeDescription(", "homeassistant.util import dt from . import ShellyDeviceWrapper from .const import", "(\"emeter\", \"powerFactor\"): BlockAttributeDescription( name=\"Power Factor\", unit=PERCENTAGE, value=lambda value: round(value *", "), \"uptime\": RestAttributeDescription( name=\"Uptime\", value=get_device_uptime, device_class=sensor.DEVICE_CLASS_TIMESTAMP, default_enabled=False, ), } async", "value = cast(float, self.attribute_value) if self._last_value and self._last_value > value:", "\"tilt\"): BlockAttributeDescription( name=\"Tilt\", unit=DEGREE, icon=\"mdi:angle-acute\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"totalWorkTime\"): BlockAttributeDescription(", "(\"light\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60", "not None ): value = cast(float, self.attribute_value) if self._last_value and", "import ConfigEntry from homeassistant.const import ( CONCENTRATION_PARTS_PER_MILLION, DEGREE, ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT,", "value, device_class=sensor.DEVICE_CLASS_CURRENT, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"light\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda", "_: status[\"wifi_sta\"][\"rssi\"], device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), \"uptime\": RestAttributeDescription( name=\"Uptime\", value=get_device_uptime,", "import timedelta import logging from typing import Final, cast import", "def unit_of_measurement(self) -> str | None: \"\"\"Return unit of sensor.\"\"\"", "self.last_state @property def state_class(self) -> str | None: \"\"\"State class", "\"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 /", "), (\"sensor\", \"sensorOp\"): BlockAttributeDescription( name=\"Operation\", icon=\"mdi:cog-transfer\", value=lambda value: value, extra_state_attributes=lambda", "BlockAttributeDescription( name=\"Device Temperature\", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT,", "from datetime import timedelta import logging from typing import Final,", "from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing", "None: \"\"\"Return unit of sensor.\"\"\" return self.description.unit class ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity):", "device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999), ), (\"sensor\",", "of sensor.\"\"\" return self.attribute_value @property def state_class(self) -> str |", "value=get_device_uptime, device_class=sensor.DEVICE_CLASS_TIMESTAMP, default_enabled=False, ), } async def async_setup_entry( hass: HomeAssistant,", "\"luminosity\"): BlockAttributeDescription( name=\"Luminosity\", unit=LIGHT_LUX, device_class=sensor.DEVICE_CLASS_ILLUMINANCE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"tilt\"): BlockAttributeDescription(", "(\"sensor\", \"sensorOp\"): BlockAttributeDescription( name=\"Operation\", icon=\"mdi:cog-transfer\", value=lambda value: value, extra_state_attributes=lambda block:", "state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, last_reset=LAST_RESET_UPTIME, ), (\"relay\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda", "name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"device\",", "state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value,", "hass, config_entry, async_add_entities, SENSORS, ShellySleepingSensor ) else: await async_setup_entry_attribute_entities( hass,", "name=\"Battery\", unit=PERCENTAGE, device_class=sensor.DEVICE_CLASS_BATTERY, state_class=sensor.STATE_CLASS_MEASUREMENT, removal_condition=lambda settings, _: settings.get(\"external_power\") == 1,", "REST_SENSORS: Final = { \"rssi\": RestAttributeDescription( name=\"RSSI\", unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, value=lambda status,", "is not None: return self.attribute_value return self.last_state @property def state_class(self)", "state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"emeter\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value:", "unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"device\",", "1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda", "self.name) self._last_value = value return self.attribute_value @property def state_class(self) ->", "return self.last_state @property def state_class(self) -> str | None: \"\"\"State", "device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"device\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value:", "1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, last_reset=LAST_RESET_UPTIME, ), (\"relay\", \"energy\"): BlockAttributeDescription(", "hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: \"\"\"Set", "import AddEntitiesCallback from homeassistant.helpers.typing import StateType from homeassistant.util import dt", "value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME,", "default_enabled=False, ), } async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry,", "state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"light\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value:", "from homeassistant.components import sensor from homeassistant.components.sensor import SensorEntity from homeassistant.config_entries", "homeassistant.const import ( CONCENTRATION_PARTS_PER_MILLION, DEGREE, ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, LIGHT_LUX, PERCENTAGE,", "import annotations from datetime import timedelta import logging from typing", "attribute: str, description: BlockAttributeDescription, ) -> None: \"\"\"Initialize sensor.\"\"\" super().__init__(wrapper,", "super().__init__(wrapper, block, attribute, description) self._last_value: float | None = None", "None: \"\"\"Return unit of sensor.\"\"\" return cast(str, self._unit) class ShellyRestSensor(ShellyRestAttributeEntity,", "state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999), ), (\"sensor\", \"humidity\"):", "-> str | None: \"\"\"Return unit of sensor.\"\"\" return self.description.unit", "state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"device\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value", "from homeassistant.const import ( CONCENTRATION_PARTS_PER_MILLION, DEGREE, ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, LIGHT_LUX,", "self.description.state_class @property def unit_of_measurement(self) -> str | None: \"\"\"Return unit", "state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"voltage\"): BlockAttributeDescription( name=\"Voltage\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value,", "/ 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, last_reset=LAST_RESET_UPTIME, ),", "cast(bool, block.extTemp != 999), ), (\"sensor\", \"luminosity\"): BlockAttributeDescription( name=\"Luminosity\", unit=LIGHT_LUX,", "/ 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"sensor\",", "\"\"\"Represent a shelly sensor.\"\"\" def __init__( self, wrapper: ShellyDeviceWrapper, block:", "ShellyDeviceWrapper, block: aioshelly.Block, attribute: str, description: BlockAttributeDescription, ) -> None:", "HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import StateType from", "value=lambda value: value, extra_state_attributes=lambda block: {\"self_test\": block.selfTest}, ), } REST_SENSORS:", "sensor.\"\"\" if self.block is not None: return self.attribute_value return self.last_state", "_LOGGER.info(\"Energy reset detected for entity %s\", self.name) self._last_value = value", "reset detected for entity %s\", self.name) self._last_value = value return", "from homeassistant.components.sensor import SensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const", "{ (\"device\", \"battery\"): BlockAttributeDescription( name=\"Battery\", unit=PERCENTAGE, device_class=sensor.DEVICE_CLASS_BATTERY, state_class=sensor.STATE_CLASS_MEASUREMENT, removal_condition=lambda settings,", "), (\"relay\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value /", "of sensor.\"\"\" if self.block is not None: return self.attribute_value return", "999), ), (\"sensor\", \"humidity\"): BlockAttributeDescription( name=\"Humidity\", unit=PERCENTAGE, value=lambda value: round(value,", "name=\"Humidity\", unit=PERCENTAGE, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_HUMIDITY, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block:", "LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS from .entity import ( BlockAttributeDescription, RestAttributeDescription, ShellyBlockAttributeEntity, ShellyRestAttributeEntity,", "from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import StateType from homeassistant.util", "state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999), ), (\"sensor\", \"luminosity\"):", "name=\"Voltage\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\",", "value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"sensorOp\"): BlockAttributeDescription(", "name=\"Power Factor\", unit=PERCENTAGE, value=lambda value: round(value * 100, 1), device_class=sensor.DEVICE_CLASS_POWER_FACTOR,", "* 100, 1), device_class=sensor.DEVICE_CLASS_POWER_FACTOR, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"power\"): BlockAttributeDescription( name=\"Power\",", "/ 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"sensor\", \"concentration\"): BlockAttributeDescription(", "( CONCENTRATION_PARTS_PER_MILLION, DEGREE, ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, LIGHT_LUX, PERCENTAGE, POWER_WATT, SIGNAL_STRENGTH_DECIBELS_MILLIWATT,", "from .entity import ( BlockAttributeDescription, RestAttributeDescription, ShellyBlockAttributeEntity, ShellyRestAttributeEntity, ShellySleepingBlockAttributeEntity, async_setup_entry_attribute_entities,", "\"\"\"State class of sensor.\"\"\" return self.description.state_class @property def unit_of_measurement(self) ->", "= logging.getLogger(__name__) SENSORS: Final = { (\"device\", \"battery\"): BlockAttributeDescription( name=\"Battery\",", "= dt.utcnow().replace(second=0, microsecond=0) _LOGGER.info(\"Energy reset detected for entity %s\", self.name)", ") from .utils import get_device_uptime, temperature_unit _LOGGER: Final = logging.getLogger(__name__)", "async_setup_entry_attribute_entities, async_setup_entry_rest, ) from .utils import get_device_uptime, temperature_unit _LOGGER: Final", "device_class=sensor.DEVICE_CLASS_CURRENT, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"light\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value:", "self._last_value and self._last_value > value: self._attr_last_reset = dt.utcnow().replace(second=0, microsecond=0) _LOGGER.info(\"Energy", "self._last_value = value return self.attribute_value @property def state_class(self) -> str", "( BlockAttributeDescription, RestAttributeDescription, ShellyBlockAttributeEntity, ShellyRestAttributeEntity, ShellySleepingBlockAttributeEntity, async_setup_entry_attribute_entities, async_setup_entry_rest, ) from", "value: value, device_class=sensor.DEVICE_CLASS_CURRENT, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"light\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT,", "extra_state_attributes=lambda block: {\"self_test\": block.selfTest}, ), } REST_SENSORS: Final = {", "name=\"Gas Concentration\", unit=CONCENTRATION_PARTS_PER_MILLION, icon=\"mdi:gauge\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"extTemp\"): BlockAttributeDescription( name=\"Temperature\",", "( self.description.last_reset == LAST_RESET_UPTIME and self.attribute_value is not None ):", "round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"light\", \"energy\"):", "): value = cast(float, self.attribute_value) if self._last_value and self._last_value >", "} REST_SENSORS: Final = { \"rssi\": RestAttributeDescription( name=\"RSSI\", unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, value=lambda", "Final = logging.getLogger(__name__) SENSORS: Final = { (\"device\", \"battery\"): BlockAttributeDescription(", "BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ),", "sensor.\"\"\" return self.attribute_value @property def state_class(self) -> str | None:", "config_entry, async_add_entities, SENSORS, ShellySensor ) await async_setup_entry_rest( hass, config_entry, async_add_entities,", "1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"device\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda", "= value return self.attribute_value @property def state_class(self) -> str |", "), (\"emeter\", \"energyReturned\"): BlockAttributeDescription( name=\"Energy Returned\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value", "ShellySleepingBlockAttributeEntity, async_setup_entry_attribute_entities, async_setup_entry_rest, ) from .utils import get_device_uptime, temperature_unit _LOGGER:", "\"\"\"Sensor for Shelly.\"\"\" from __future__ import annotations from datetime import", "default_enabled=False, ), (\"emeter\", \"current\"): BlockAttributeDescription( name=\"Current\", unit=ELECTRIC_CURRENT_AMPERE, value=lambda value: value,", "(\"sensor\", \"extTemp\"): BlockAttributeDescription( name=\"Temperature\", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE,", "self._last_value > value: self._attr_last_reset = dt.utcnow().replace(second=0, microsecond=0) _LOGGER.info(\"Energy reset detected", "device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"light\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda", "return self.description.state_class @property def unit_of_measurement(self) -> str | None: \"\"\"Return", ") class ShellySensor(ShellyBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly sensor.\"\"\" def __init__(", "name=\"Device Temperature\", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False,", "\"uptime\": RestAttributeDescription( name=\"Uptime\", value=get_device_uptime, device_class=sensor.DEVICE_CLASS_TIMESTAMP, default_enabled=False, ), } async def", "SensorEntity): \"\"\"Represent a shelly REST sensor.\"\"\" @property def state(self) ->", "state_class=sensor.STATE_CLASS_MEASUREMENT, removal_condition=lambda settings, _: settings.get(\"external_power\") == 1, ), (\"device\", \"deviceTemp\"):", "sensor from homeassistant.components.sensor import SensorEntity from homeassistant.config_entries import ConfigEntry from", "icon=\"mdi:gauge\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"extTemp\"): BlockAttributeDescription( name=\"Temperature\", unit=temperature_unit, value=lambda value:", "datetime import timedelta import logging from typing import Final, cast", "(\"relay\", \"totalWorkTime\"): BlockAttributeDescription( name=\"Lamp Life\", unit=PERCENTAGE, icon=\"mdi:progress-wrench\", value=lambda value: round(100", "\"rollerEnergy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 /", "homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import", "value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp", "), (\"roller\", \"rollerPower\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1),", "round(block.totalWorkTime / 3600, 1) }, ), (\"adc\", \"adc\"): BlockAttributeDescription( name=\"ADC\",", "async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySleepingSensor ) else: await async_setup_entry_attribute_entities(", "if self.block is not None: return self.attribute_value return self.last_state @property", "- timedelta(seconds=wrapper.device.status[\"uptime\"]) ).replace(second=0, microsecond=0) @property def state(self) -> StateType: \"\"\"Return", "str | None: \"\"\"Return unit of sensor.\"\"\" return self.description.unit class", "dt.utcnow().replace(second=0, microsecond=0) _LOGGER.info(\"Energy reset detected for entity %s\", self.name) self._last_value", "), (\"emeter\", \"powerFactor\"): BlockAttributeDescription( name=\"Power Factor\", unit=PERCENTAGE, value=lambda value: round(value", "if config_entry.data[\"sleep_period\"]: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySleepingSensor )", ") await async_setup_entry_rest( hass, config_entry, async_add_entities, REST_SENSORS, ShellyRestSensor ) class", "unit=DEGREE, icon=\"mdi:angle-acute\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"totalWorkTime\"): BlockAttributeDescription( name=\"Lamp Life\", unit=PERCENTAGE,", "not None: return self.attribute_value return self.last_state @property def state_class(self) ->", "> value: self._attr_last_reset = dt.utcnow().replace(second=0, microsecond=0) _LOGGER.info(\"Energy reset detected for", "device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"powerFactor\"): BlockAttributeDescription( name=\"Power Factor\", unit=PERCENTAGE, value=lambda", "1), device_class=sensor.DEVICE_CLASS_HUMIDITY, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999), ),", "wrapper: ShellyDeviceWrapper, block: aioshelly.Block, attribute: str, description: BlockAttributeDescription, ) ->", "logging from typing import Final, cast import aioshelly from homeassistant.components", "from homeassistant.util import dt from . import ShellyDeviceWrapper from .const", "is not None ): value = cast(float, self.attribute_value) if self._last_value", "of sensor.\"\"\" if ( self.description.last_reset == LAST_RESET_UPTIME and self.attribute_value is", "round(100 - (value / 3600 / SHAIR_MAX_WORK_HOURS), 1), extra_state_attributes=lambda block:", "value=lambda value: round(value * 100, 1), device_class=sensor.DEVICE_CLASS_POWER_FACTOR, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\",", "}, ), (\"adc\", \"adc\"): BlockAttributeDescription( name=\"ADC\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value,", "{\"self_test\": block.selfTest}, ), } REST_SENSORS: Final = { \"rssi\": RestAttributeDescription(", "| None = None if description.last_reset == LAST_RESET_NEVER: self._attr_last_reset =", "unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"emeter\",", "def state(self) -> StateType: \"\"\"Return value of sensor.\"\"\" if self.block", "available=lambda block: cast(bool, block.extTemp != 999), ), (\"sensor\", \"luminosity\"): BlockAttributeDescription(", "for device.\"\"\" if config_entry.data[\"sleep_period\"]: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS,", "icon=\"mdi:progress-wrench\", value=lambda value: round(100 - (value / 3600 / SHAIR_MAX_WORK_HOURS),", "1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"device\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT,", "from homeassistant.helpers.typing import StateType from homeassistant.util import dt from .", "@property def state_class(self) -> str | None: \"\"\"State class of", "import aioshelly from homeassistant.components import sensor from homeassistant.components.sensor import SensorEntity", "\"adc\"): BlockAttributeDescription( name=\"ADC\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT,", "), (\"sensor\", \"concentration\"): BlockAttributeDescription( name=\"Gas Concentration\", unit=CONCENTRATION_PARTS_PER_MILLION, icon=\"mdi:gauge\", state_class=sensor.STATE_CLASS_MEASUREMENT, ),", "@property def unit_of_measurement(self) -> str | None: \"\"\"Return unit of", "RestAttributeDescription( name=\"RSSI\", unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, value=lambda status, _: status[\"wifi_sta\"][\"rssi\"], device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False,", "async_setup_entry_rest( hass, config_entry, async_add_entities, REST_SENSORS, ShellyRestSensor ) class ShellySensor(ShellyBlockAttributeEntity, SensorEntity):", "device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"sensorOp\"): BlockAttributeDescription( name=\"Operation\", icon=\"mdi:cog-transfer\", value=lambda value:", "Returned\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT,", "state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"sensor\", \"concentration\"): BlockAttributeDescription( name=\"Gas Concentration\", unit=CONCENTRATION_PARTS_PER_MILLION, icon=\"mdi:gauge\",", "(\"device\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER,", "{ \"Operational hours\": round(block.totalWorkTime / 3600, 1) }, ), (\"adc\",", "return self.description.unit class ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly sleeping sensor.\"\"\"", "= cast(float, self.attribute_value) if self._last_value and self._last_value > value: self._attr_last_reset", "import ( BlockAttributeDescription, RestAttributeDescription, ShellyBlockAttributeEntity, ShellyRestAttributeEntity, ShellySleepingBlockAttributeEntity, async_setup_entry_attribute_entities, async_setup_entry_rest, )", "if description.last_reset == LAST_RESET_NEVER: self._attr_last_reset = dt.utc_from_timestamp(0) elif description.last_reset ==", "(\"device\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60", "a shelly REST sensor.\"\"\" @property def state(self) -> StateType: \"\"\"Return", "import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import StateType", "round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"roller\", \"rollerPower\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT,", "BlockAttributeDescription( name=\"Current\", unit=ELECTRIC_CURRENT_AMPERE, value=lambda value: value, device_class=sensor.DEVICE_CLASS_CURRENT, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"light\",", "description: BlockAttributeDescription, ) -> None: \"\"\"Initialize sensor.\"\"\" super().__init__(wrapper, block, attribute,", "value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False,", "_: settings.get(\"external_power\") == 1, ), (\"device\", \"deviceTemp\"): BlockAttributeDescription( name=\"Device Temperature\",", "homeassistant.config_entries import ConfigEntry from homeassistant.const import ( CONCENTRATION_PARTS_PER_MILLION, DEGREE, ELECTRIC_CURRENT_AMPERE,", "), (\"device\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1),", "await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySleepingSensor ) else: await", "value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"device\", \"power\"): BlockAttributeDescription(", "sensors for device.\"\"\" if config_entry.data[\"sleep_period\"]: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities,", "\"deviceTemp\"): BlockAttributeDescription( name=\"Device Temperature\", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE,", "Final = { (\"device\", \"battery\"): BlockAttributeDescription( name=\"Battery\", unit=PERCENTAGE, device_class=sensor.DEVICE_CLASS_BATTERY, state_class=sensor.STATE_CLASS_MEASUREMENT,", "microsecond=0) @property def state(self) -> StateType: \"\"\"Return value of sensor.\"\"\"", "/ 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"emeter\", \"energyReturned\"): BlockAttributeDescription(", "class of sensor.\"\"\" return self.description.state_class @property def unit_of_measurement(self) -> str", "dt from . import ShellyDeviceWrapper from .const import LAST_RESET_NEVER, LAST_RESET_UPTIME,", "\"\"\"Set up sensors for device.\"\"\" if config_entry.data[\"sleep_period\"]: await async_setup_entry_attribute_entities( hass,", "device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"emeter\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda", "), (\"emeter\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value /", "(\"emeter\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000,", "/ 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"roller\",", "last_reset=LAST_RESET_UPTIME, ), (\"relay\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value", "Concentration\", unit=CONCENTRATION_PARTS_PER_MILLION, icon=\"mdi:gauge\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"extTemp\"): BlockAttributeDescription( name=\"Temperature\", unit=temperature_unit,", "shelly sensor.\"\"\" def __init__( self, wrapper: ShellyDeviceWrapper, block: aioshelly.Block, attribute:", "str, description: BlockAttributeDescription, ) -> None: \"\"\"Initialize sensor.\"\"\" super().__init__(wrapper, block,", "self.attribute_value return self.last_state @property def state_class(self) -> str | None:", "(\"relay\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER,", "device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"roller\", \"rollerEnergy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda", "last_reset=LAST_RESET_NEVER, ), (\"emeter\", \"energyReturned\"): BlockAttributeDescription( name=\"Energy Returned\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value:", "config_entry, async_add_entities, SENSORS, ShellySleepingSensor ) else: await async_setup_entry_attribute_entities( hass, config_entry,", "(\"relay\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60", "\"current\"): BlockAttributeDescription( name=\"Current\", unit=ELECTRIC_CURRENT_AMPERE, value=lambda value: value, device_class=sensor.DEVICE_CLASS_CURRENT, state_class=sensor.STATE_CLASS_MEASUREMENT, ),", ").replace(second=0, microsecond=0) @property def state(self) -> StateType: \"\"\"Return value of", "\"\"\"Return value of sensor.\"\"\" return self.attribute_value @property def state_class(self) ->", "\"humidity\"): BlockAttributeDescription( name=\"Humidity\", unit=PERCENTAGE, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_HUMIDITY, state_class=sensor.STATE_CLASS_MEASUREMENT,", "unit of sensor.\"\"\" return self.description.unit class ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity): \"\"\"Represent a", "class ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity): \"\"\"Represent a shelly REST sensor.\"\"\" @property def", "SENSORS, ShellySleepingSensor ) else: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS,", "round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"emeter\", \"energyReturned\"):", "device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"roller\", \"rollerPower\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value:", "device_class=sensor.DEVICE_CLASS_ILLUMINANCE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"tilt\"): BlockAttributeDescription( name=\"Tilt\", unit=DEGREE, icon=\"mdi:angle-acute\", state_class=sensor.STATE_CLASS_MEASUREMENT,", "sleeping sensor.\"\"\" @property def state(self) -> StateType: \"\"\"Return value of", "), (\"sensor\", \"tilt\"): BlockAttributeDescription( name=\"Tilt\", unit=DEGREE, icon=\"mdi:angle-acute\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\",", "Factor\", unit=PERCENTAGE, value=lambda value: round(value * 100, 1), device_class=sensor.DEVICE_CLASS_POWER_FACTOR, state_class=sensor.STATE_CLASS_MEASUREMENT,", "/ 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, last_reset=LAST_RESET_UPTIME, ), (\"relay\", \"energy\"):", "<reponame>RavensburgOP/core \"\"\"Sensor for Shelly.\"\"\" from __future__ import annotations from datetime", "async_add_entities, SENSORS, ShellySleepingSensor ) else: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities,", "round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT,", "value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"emeter\",", "\"sensorOp\"): BlockAttributeDescription( name=\"Operation\", icon=\"mdi:cog-transfer\", value=lambda value: value, extra_state_attributes=lambda block: {\"self_test\":", "self.description.last_reset == LAST_RESET_UPTIME and self.attribute_value is not None ): value", "name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"roller\",", "homeassistant.components import sensor from homeassistant.components.sensor import SensorEntity from homeassistant.config_entries import", "POWER_WATT, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import", "import get_device_uptime, temperature_unit _LOGGER: Final = logging.getLogger(__name__) SENSORS: Final =", "\"totalWorkTime\"): BlockAttributeDescription( name=\"Lamp Life\", unit=PERCENTAGE, icon=\"mdi:progress-wrench\", value=lambda value: round(100 -", "Final = { \"rssi\": RestAttributeDescription( name=\"RSSI\", unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, value=lambda status, _:", "from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( CONCENTRATION_PARTS_PER_MILLION, DEGREE,", "_LOGGER: Final = logging.getLogger(__name__) SENSORS: Final = { (\"device\", \"battery\"):", "@property def state(self) -> StateType: \"\"\"Return value of sensor.\"\"\" return", "value: round(value, 1), device_class=sensor.DEVICE_CLASS_HUMIDITY, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp !=", "== LAST_RESET_UPTIME: self._attr_last_reset = ( dt.utcnow() - timedelta(seconds=wrapper.device.status[\"uptime\"]) ).replace(second=0, microsecond=0)", ") -> None: \"\"\"Initialize sensor.\"\"\" super().__init__(wrapper, block, attribute, description) self._last_value:", "1) }, ), (\"adc\", \"adc\"): BlockAttributeDescription( name=\"ADC\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value:", "-> str | None: \"\"\"State class of sensor.\"\"\" return self.description.state_class", "\"battery\"): BlockAttributeDescription( name=\"Battery\", unit=PERCENTAGE, device_class=sensor.DEVICE_CLASS_BATTERY, state_class=sensor.STATE_CLASS_MEASUREMENT, removal_condition=lambda settings, _: settings.get(\"external_power\")", "BlockAttributeDescription( name=\"Voltage\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ),", "block, attribute, description) self._last_value: float | None = None if", "last_reset=LAST_RESET_NEVER, ), (\"light\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value", "from __future__ import annotations from datetime import timedelta import logging", "name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ),", "BlockAttributeDescription( name=\"Gas Concentration\", unit=CONCENTRATION_PARTS_PER_MILLION, icon=\"mdi:gauge\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"extTemp\"): BlockAttributeDescription(", "= { (\"device\", \"battery\"): BlockAttributeDescription( name=\"Battery\", unit=PERCENTAGE, device_class=sensor.DEVICE_CLASS_BATTERY, state_class=sensor.STATE_CLASS_MEASUREMENT, removal_condition=lambda", "unit=ELECTRIC_CURRENT_AMPERE, value=lambda value: value, device_class=sensor.DEVICE_CLASS_CURRENT, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"light\", \"power\"): BlockAttributeDescription(", "state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"powerFactor\"): BlockAttributeDescription( name=\"Power Factor\", unit=PERCENTAGE, value=lambda value:", "(\"adc\", \"adc\"): BlockAttributeDescription( name=\"ADC\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE,", "SHAIR_MAX_WORK_HOURS from .entity import ( BlockAttributeDescription, RestAttributeDescription, ShellyBlockAttributeEntity, ShellyRestAttributeEntity, ShellySleepingBlockAttributeEntity,", "None ): value = cast(float, self.attribute_value) if self._last_value and self._last_value", "3600 / SHAIR_MAX_WORK_HOURS), 1), extra_state_attributes=lambda block: { \"Operational hours\": round(block.totalWorkTime", "unit=LIGHT_LUX, device_class=sensor.DEVICE_CLASS_ILLUMINANCE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"tilt\"): BlockAttributeDescription( name=\"Tilt\", unit=DEGREE, icon=\"mdi:angle-acute\",", "async_add_entities, REST_SENSORS, ShellyRestSensor ) class ShellySensor(ShellyBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly", "{ \"rssi\": RestAttributeDescription( name=\"RSSI\", unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, value=lambda status, _: status[\"wifi_sta\"][\"rssi\"], device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH,", "value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"sensorOp\"): BlockAttributeDescription( name=\"Operation\",", "value=lambda value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ),", "BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 / 1000,", "BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY,", "SHAIR_MAX_WORK_HOURS), 1), extra_state_attributes=lambda block: { \"Operational hours\": round(block.totalWorkTime / 3600,", "unit=PERCENTAGE, device_class=sensor.DEVICE_CLASS_BATTERY, state_class=sensor.STATE_CLASS_MEASUREMENT, removal_condition=lambda settings, _: settings.get(\"external_power\") == 1, ),", "icon=\"mdi:angle-acute\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"totalWorkTime\"): BlockAttributeDescription( name=\"Lamp Life\", unit=PERCENTAGE, icon=\"mdi:progress-wrench\",", "typing import Final, cast import aioshelly from homeassistant.components import sensor", "| None: \"\"\"Return unit of sensor.\"\"\" return cast(str, self._unit) class", "), (\"light\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value /", "None = None if description.last_reset == LAST_RESET_NEVER: self._attr_last_reset = dt.utc_from_timestamp(0)", "(\"emeter\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER,", "device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"emeter\", \"current\"): BlockAttributeDescription( name=\"Current\", unit=ELECTRIC_CURRENT_AMPERE, value=lambda", "Life\", unit=PERCENTAGE, icon=\"mdi:progress-wrench\", value=lambda value: round(100 - (value / 3600", "60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"sensor\", \"concentration\"):", "ShellyRestSensor ) class ShellySensor(ShellyBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly sensor.\"\"\" def", "), } REST_SENSORS: Final = { \"rssi\": RestAttributeDescription( name=\"RSSI\", unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,", "of sensor.\"\"\" return self.description.state_class @property def unit_of_measurement(self) -> str |", "up sensors for device.\"\"\" if config_entry.data[\"sleep_period\"]: await async_setup_entry_attribute_entities( hass, config_entry,", "(\"emeter\", \"energyReturned\"): BlockAttributeDescription( name=\"Energy Returned\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value /", "Temperature\", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ),", "SensorEntity): \"\"\"Represent a shelly sensor.\"\"\" def __init__( self, wrapper: ShellyDeviceWrapper,", "BlockAttributeDescription( name=\"Tilt\", unit=DEGREE, icon=\"mdi:angle-acute\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"totalWorkTime\"): BlockAttributeDescription( name=\"Lamp", "if ( self.description.last_reset == LAST_RESET_UPTIME and self.attribute_value is not None", "(\"roller\", \"rollerEnergy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60", "from typing import Final, cast import aioshelly from homeassistant.components import", "unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"device\", \"energy\"):", "2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"light\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR,", "= None if description.last_reset == LAST_RESET_NEVER: self._attr_last_reset = dt.utc_from_timestamp(0) elif", "import Final, cast import aioshelly from homeassistant.components import sensor from", "1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"roller\", \"rollerPower\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda", "config_entry, async_add_entities, REST_SENSORS, ShellyRestSensor ) class ShellySensor(ShellyBlockAttributeEntity, SensorEntity): \"\"\"Represent a", "state(self) -> StateType: \"\"\"Return value of sensor.\"\"\" if ( self.description.last_reset", "and self._last_value > value: self._attr_last_reset = dt.utcnow().replace(second=0, microsecond=0) _LOGGER.info(\"Energy reset", "== LAST_RESET_UPTIME and self.attribute_value is not None ): value =", "StateType: \"\"\"Return value of sensor.\"\"\" if self.block is not None:", "self._attr_last_reset = ( dt.utcnow() - timedelta(seconds=wrapper.device.status[\"uptime\"]) ).replace(second=0, microsecond=0) @property def", "timedelta(seconds=wrapper.device.status[\"uptime\"]) ).replace(second=0, microsecond=0) @property def state(self) -> StateType: \"\"\"Return value", "for Shelly.\"\"\" from __future__ import annotations from datetime import timedelta", "value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"roller\", \"rollerPower\"): BlockAttributeDescription( name=\"Power\",", "ShellySleepingSensor ) else: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySensor", "state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"emeter\", \"energyReturned\"): BlockAttributeDescription( name=\"Energy Returned\", unit=ENERGY_KILO_WATT_HOUR, value=lambda", ") else: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySensor )", "-> None: \"\"\"Set up sensors for device.\"\"\" if config_entry.data[\"sleep_period\"]: await", "REST_SENSORS, ShellyRestSensor ) class ShellySensor(ShellyBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly sensor.\"\"\"", "aioshelly.Block, attribute: str, description: BlockAttributeDescription, ) -> None: \"\"\"Initialize sensor.\"\"\"", "device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"sensor\", \"concentration\"): BlockAttributeDescription( name=\"Gas Concentration\", unit=CONCENTRATION_PARTS_PER_MILLION,", "async_add_entities: AddEntitiesCallback, ) -> None: \"\"\"Set up sensors for device.\"\"\"", "None: \"\"\"Set up sensors for device.\"\"\" if config_entry.data[\"sleep_period\"]: await async_setup_entry_attribute_entities(", "/ 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"emeter\",", "def __init__( self, wrapper: ShellyDeviceWrapper, block: aioshelly.Block, attribute: str, description:", "settings.get(\"external_power\") == 1, ), (\"device\", \"deviceTemp\"): BlockAttributeDescription( name=\"Device Temperature\", unit=temperature_unit,", "value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"emeter\", \"current\"):", "microsecond=0) _LOGGER.info(\"Energy reset detected for entity %s\", self.name) self._last_value =", "2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, last_reset=LAST_RESET_UPTIME, ), (\"relay\", \"energy\"): BlockAttributeDescription( name=\"Energy\",", "def state_class(self) -> str | None: \"\"\"State class of sensor.\"\"\"", ". import ShellyDeviceWrapper from .const import LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS from", "), (\"device\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value /", "1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"emeter\", \"energyReturned\"): BlockAttributeDescription( name=\"Energy", "value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"power\"): BlockAttributeDescription(", "name=\"Uptime\", value=get_device_uptime, device_class=sensor.DEVICE_CLASS_TIMESTAMP, default_enabled=False, ), } async def async_setup_entry( hass:", "\"energyReturned\"): BlockAttributeDescription( name=\"Energy Returned\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000,", "state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"totalWorkTime\"): BlockAttributeDescription( name=\"Lamp Life\", unit=PERCENTAGE, icon=\"mdi:progress-wrench\", value=lambda", "BlockAttributeDescription( name=\"Operation\", icon=\"mdi:cog-transfer\", value=lambda value: value, extra_state_attributes=lambda block: {\"self_test\": block.selfTest},", "value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"device\", \"energy\"): BlockAttributeDescription(", "unit=PERCENTAGE, icon=\"mdi:progress-wrench\", value=lambda value: round(100 - (value / 3600 /", "BlockAttributeDescription, RestAttributeDescription, ShellyBlockAttributeEntity, ShellyRestAttributeEntity, ShellySleepingBlockAttributeEntity, async_setup_entry_attribute_entities, async_setup_entry_rest, ) from .utils", "import ( CONCENTRATION_PARTS_PER_MILLION, DEGREE, ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, LIGHT_LUX, PERCENTAGE, POWER_WATT,", "value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"light\",", "import ShellyDeviceWrapper from .const import LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS from .entity", "import LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS from .entity import ( BlockAttributeDescription, RestAttributeDescription,", "value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"voltage\"): BlockAttributeDescription(", "3600, 1) }, ), (\"adc\", \"adc\"): BlockAttributeDescription( name=\"ADC\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda", "\"rollerPower\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT,", "BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False,", "- (value / 3600 / SHAIR_MAX_WORK_HOURS), 1), extra_state_attributes=lambda block: {", "sensor.\"\"\" return self.description.state_class @property def unit_of_measurement(self) -> str | None:", "state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value,", "REST sensor.\"\"\" @property def state(self) -> StateType: \"\"\"Return value of", "status[\"wifi_sta\"][\"rssi\"], device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), \"uptime\": RestAttributeDescription( name=\"Uptime\", value=get_device_uptime, device_class=sensor.DEVICE_CLASS_TIMESTAMP,", "== 1, ), (\"device\", \"deviceTemp\"): BlockAttributeDescription( name=\"Device Temperature\", unit=temperature_unit, value=lambda", "block.extTemp != 999), ), (\"sensor\", \"humidity\"): BlockAttributeDescription( name=\"Humidity\", unit=PERCENTAGE, value=lambda", "state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"light\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value,", "ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly sleeping sensor.\"\"\" @property def state(self)", "state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"emeter\", \"current\"): BlockAttributeDescription( name=\"Current\", unit=ELECTRIC_CURRENT_AMPERE, value=lambda value:", "ShellyBlockAttributeEntity, ShellyRestAttributeEntity, ShellySleepingBlockAttributeEntity, async_setup_entry_attribute_entities, async_setup_entry_rest, ) from .utils import get_device_uptime,", "\"\"\"Return unit of sensor.\"\"\" return cast(str, self._unit) class ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity):", ".const import LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS from .entity import ( BlockAttributeDescription,", "1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"voltage\"): BlockAttributeDescription( name=\"Voltage\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda", "block: cast(bool, block.extTemp != 999), ), (\"sensor\", \"luminosity\"): BlockAttributeDescription( name=\"Luminosity\",", "hours\": round(block.totalWorkTime / 3600, 1) }, ), (\"adc\", \"adc\"): BlockAttributeDescription(", "state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"device\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value:", "), (\"sensor\", \"extTemp\"): BlockAttributeDescription( name=\"Temperature\", unit=temperature_unit, value=lambda value: round(value, 1),", "unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"voltage\"):", "self.attribute_value) if self._last_value and self._last_value > value: self._attr_last_reset = dt.utcnow().replace(second=0,", "value of sensor.\"\"\" if ( self.description.last_reset == LAST_RESET_UPTIME and self.attribute_value", "attribute, description) self._last_value: float | None = None if description.last_reset", "get_device_uptime, temperature_unit _LOGGER: Final = logging.getLogger(__name__) SENSORS: Final = {", "ConfigEntry from homeassistant.const import ( CONCENTRATION_PARTS_PER_MILLION, DEGREE, ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR,", "StateType: \"\"\"Return value of sensor.\"\"\" if ( self.description.last_reset == LAST_RESET_UPTIME", "annotations from datetime import timedelta import logging from typing import", "cast(float, self.attribute_value) if self._last_value and self._last_value > value: self._attr_last_reset =", "of sensor.\"\"\" return self.description.unit class ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly", "), (\"emeter\", \"current\"): BlockAttributeDescription( name=\"Current\", unit=ELECTRIC_CURRENT_AMPERE, value=lambda value: value, device_class=sensor.DEVICE_CLASS_CURRENT,", "homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import StateType from homeassistant.util import", "sensor.\"\"\" super().__init__(wrapper, block, attribute, description) self._last_value: float | None =", "name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT,", "-> StateType: \"\"\"Return value of sensor.\"\"\" if self.block is not", "__init__( self, wrapper: ShellyDeviceWrapper, block: aioshelly.Block, attribute: str, description: BlockAttributeDescription,", "1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_NEVER, ), (\"light\", \"energy\"): BlockAttributeDescription( name=\"Energy\",", "/ 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"roller\", \"rollerEnergy\"): BlockAttributeDescription(", "sensor.\"\"\" return self.description.unit class ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly sleeping", "1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"emeter\", \"current\"): BlockAttributeDescription( name=\"Current\", unit=ELECTRIC_CURRENT_AMPERE,", "), (\"emeter\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1),", "\"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000, 2),", "config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: \"\"\"Set up sensors", "description) self._last_value: float | None = None if description.last_reset ==", "unit of sensor.\"\"\" return cast(str, self._unit) class ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity): \"\"\"Represent", "None if description.last_reset == LAST_RESET_NEVER: self._attr_last_reset = dt.utc_from_timestamp(0) elif description.last_reset", "), (\"device\", \"deviceTemp\"): BlockAttributeDescription( name=\"Device Temperature\", unit=temperature_unit, value=lambda value: round(value,", "AddEntitiesCallback, ) -> None: \"\"\"Set up sensors for device.\"\"\" if", "value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"voltage\"): BlockAttributeDescription( name=\"Voltage\",", "default_enabled=False, ), (\"device\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value,", "description.last_reset == LAST_RESET_UPTIME: self._attr_last_reset = ( dt.utcnow() - timedelta(seconds=wrapper.device.status[\"uptime\"]) ).replace(second=0,", "RestAttributeDescription, ShellyBlockAttributeEntity, ShellyRestAttributeEntity, ShellySleepingBlockAttributeEntity, async_setup_entry_attribute_entities, async_setup_entry_rest, ) from .utils import", "device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, last_reset=LAST_RESET_UPTIME, ), (\"relay\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR,", "name=\"Lamp Life\", unit=PERCENTAGE, icon=\"mdi:progress-wrench\", value=lambda value: round(100 - (value /", "from . import ShellyDeviceWrapper from .const import LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS", "BlockAttributeDescription( name=\"Luminosity\", unit=LIGHT_LUX, device_class=sensor.DEVICE_CLASS_ILLUMINANCE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"tilt\"): BlockAttributeDescription( name=\"Tilt\",", "(\"roller\", \"rollerPower\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER,", "device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"voltage\"): BlockAttributeDescription( name=\"Voltage\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value:", "LAST_RESET_UPTIME and self.attribute_value is not None ): value = cast(float,", "return self.attribute_value return self.last_state @property def state_class(self) -> str |", "(\"sensor\", \"humidity\"): BlockAttributeDescription( name=\"Humidity\", unit=PERCENTAGE, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_HUMIDITY,", "last_reset=LAST_RESET_UPTIME, ), (\"sensor\", \"concentration\"): BlockAttributeDescription( name=\"Gas Concentration\", unit=CONCENTRATION_PARTS_PER_MILLION, icon=\"mdi:gauge\", state_class=sensor.STATE_CLASS_MEASUREMENT,", "extra_state_attributes=lambda block: { \"Operational hours\": round(block.totalWorkTime / 3600, 1) },", "1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"sensorOp\"): BlockAttributeDescription( name=\"Operation\", icon=\"mdi:cog-transfer\", value=lambda", "999), ), (\"sensor\", \"luminosity\"): BlockAttributeDescription( name=\"Luminosity\", unit=LIGHT_LUX, device_class=sensor.DEVICE_CLASS_ILLUMINANCE, state_class=sensor.STATE_CLASS_MEASUREMENT, ),", "self.attribute_value is not None ): value = cast(float, self.attribute_value) if", "name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 60 / 1000, 2),", "state(self) -> StateType: \"\"\"Return value of sensor.\"\"\" if self.block is", "value=lambda value: round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT,", "status, _: status[\"wifi_sta\"][\"rssi\"], device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), \"uptime\": RestAttributeDescription( name=\"Uptime\",", "BlockAttributeDescription( name=\"Temperature\", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda", "= dt.utc_from_timestamp(0) elif description.last_reset == LAST_RESET_UPTIME: self._attr_last_reset = ( dt.utcnow()", "\"concentration\"): BlockAttributeDescription( name=\"Gas Concentration\", unit=CONCENTRATION_PARTS_PER_MILLION, icon=\"mdi:gauge\", state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"extTemp\"):", "self.block is not None: return self.attribute_value return self.last_state @property def", "1), extra_state_attributes=lambda block: { \"Operational hours\": round(block.totalWorkTime / 3600, 1)", ") -> None: \"\"\"Set up sensors for device.\"\"\" if config_entry.data[\"sleep_period\"]:", "BlockAttributeDescription, ) -> None: \"\"\"Initialize sensor.\"\"\" super().__init__(wrapper, block, attribute, description)", "ShellyRestAttributeEntity, ShellySleepingBlockAttributeEntity, async_setup_entry_attribute_entities, async_setup_entry_rest, ) from .utils import get_device_uptime, temperature_unit", "!= 999), ), (\"sensor\", \"luminosity\"): BlockAttributeDescription( name=\"Luminosity\", unit=LIGHT_LUX, device_class=sensor.DEVICE_CLASS_ILLUMINANCE, state_class=sensor.STATE_CLASS_MEASUREMENT,", "shelly sleeping sensor.\"\"\" @property def state(self) -> StateType: \"\"\"Return value", "\"\"\"Return value of sensor.\"\"\" if self.block is not None: return", "device.\"\"\" if config_entry.data[\"sleep_period\"]: await async_setup_entry_attribute_entities( hass, config_entry, async_add_entities, SENSORS, ShellySleepingSensor", "device_class=sensor.DEVICE_CLASS_POWER_FACTOR, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value:", "value=lambda value: value, device_class=sensor.DEVICE_CLASS_CURRENT, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"light\", \"power\"): BlockAttributeDescription( name=\"Power\",", "import SensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.const import (", "CONCENTRATION_PARTS_PER_MILLION, DEGREE, ELECTRIC_CURRENT_AMPERE, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, LIGHT_LUX, PERCENTAGE, POWER_WATT, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, )", "-> StateType: \"\"\"Return value of sensor.\"\"\" if ( self.description.last_reset ==", "__future__ import annotations from datetime import timedelta import logging from", "round(value / 60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ),", "None: return self.attribute_value return self.last_state @property def state_class(self) -> str", "device_class=sensor.DEVICE_CLASS_HUMIDITY, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool, block.extTemp != 999), ), (\"sensor\",", "dt.utc_from_timestamp(0) elif description.last_reset == LAST_RESET_UPTIME: self._attr_last_reset = ( dt.utcnow() -", "ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, LIGHT_LUX, PERCENTAGE, POWER_WATT, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, ) from homeassistant.core import", "None: \"\"\"State class of sensor.\"\"\" return self.description.state_class @property def unit_of_measurement(self)", "block: aioshelly.Block, attribute: str, description: BlockAttributeDescription, ) -> None: \"\"\"Initialize", "last_reset=LAST_RESET_UPTIME, ), (\"emeter\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value", "device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"device\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda", "name=\"RSSI\", unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT, value=lambda status, _: status[\"wifi_sta\"][\"rssi\"], device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ),", "return cast(str, self._unit) class ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity): \"\"\"Represent a shelly REST", "value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"powerFactor\"): BlockAttributeDescription(", "2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"roller\", \"rollerEnergy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR,", "class ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity): \"\"\"Represent a shelly sleeping sensor.\"\"\" @property def", "(\"device\", \"battery\"): BlockAttributeDescription( name=\"Battery\", unit=PERCENTAGE, device_class=sensor.DEVICE_CLASS_BATTERY, state_class=sensor.STATE_CLASS_MEASUREMENT, removal_condition=lambda settings, _:", "), (\"roller\", \"rollerEnergy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value /", "ENERGY_KILO_WATT_HOUR, LIGHT_LUX, PERCENTAGE, POWER_WATT, SIGNAL_STRENGTH_DECIBELS_MILLIWATT, ) from homeassistant.core import HomeAssistant", "value, extra_state_attributes=lambda block: {\"self_test\": block.selfTest}, ), } REST_SENSORS: Final =", "state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\", \"extTemp\"): BlockAttributeDescription( name=\"Temperature\", unit=temperature_unit, value=lambda value: round(value,", "round(value * 100, 1), device_class=sensor.DEVICE_CLASS_POWER_FACTOR, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"relay\", \"power\"): BlockAttributeDescription(", "), (\"adc\", \"adc\"): BlockAttributeDescription( name=\"ADC\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1),", "from .utils import get_device_uptime, temperature_unit _LOGGER: Final = logging.getLogger(__name__) SENSORS:", "name=\"Operation\", icon=\"mdi:cog-transfer\", value=lambda value: value, extra_state_attributes=lambda block: {\"self_test\": block.selfTest}, ),", "1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"emeter\", \"powerFactor\"): BlockAttributeDescription( name=\"Power Factor\", unit=PERCENTAGE,", "None: \"\"\"Initialize sensor.\"\"\" super().__init__(wrapper, block, attribute, description) self._last_value: float |", "60 / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"emeter\", \"energy\"):", "last_reset=LAST_RESET_UPTIME, ), (\"roller\", \"rollerEnergy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value", "unit=PERCENTAGE, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_HUMIDITY, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block: cast(bool,", "), (\"relay\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1),", "/ 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"emeter\", \"energy\"): BlockAttributeDescription(", "and self.attribute_value is not None ): value = cast(float, self.attribute_value)", "value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, default_enabled=False, ), (\"device\", \"power\"):", "= ( dt.utcnow() - timedelta(seconds=wrapper.device.status[\"uptime\"]) ).replace(second=0, microsecond=0) @property def state(self)", "import sensor from homeassistant.components.sensor import SensorEntity from homeassistant.config_entries import ConfigEntry", "dt.utcnow() - timedelta(seconds=wrapper.device.status[\"uptime\"]) ).replace(second=0, microsecond=0) @property def state(self) -> StateType:", "name=\"Temperature\", unit=temperature_unit, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_TEMPERATURE, state_class=sensor.STATE_CLASS_MEASUREMENT, available=lambda block:", "of sensor.\"\"\" return cast(str, self._unit) class ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity): \"\"\"Represent a", "\"\"\"Represent a shelly REST sensor.\"\"\" @property def state(self) -> StateType:", "return self.attribute_value @property def state_class(self) -> str | None: \"\"\"State", "round(value, 1), device_class=sensor.DEVICE_CLASS_POWER, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"device\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR,", "value of sensor.\"\"\" if self.block is not None: return self.attribute_value", "value: value, extra_state_attributes=lambda block: {\"self_test\": block.selfTest}, ), } REST_SENSORS: Final", "(\"light\", \"power\"): BlockAttributeDescription( name=\"Power\", unit=POWER_WATT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_POWER,", "\"Operational hours\": round(block.totalWorkTime / 3600, 1) }, ), (\"adc\", \"adc\"):", "value=lambda value: round(100 - (value / 3600 / SHAIR_MAX_WORK_HOURS), 1),", "name=\"Energy Returned\", unit=ENERGY_KILO_WATT_HOUR, value=lambda value: round(value / 1000, 2), device_class=sensor.DEVICE_CLASS_ENERGY,", ") from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from", "2), device_class=sensor.DEVICE_CLASS_ENERGY, state_class=sensor.STATE_CLASS_MEASUREMENT, last_reset=LAST_RESET_UPTIME, ), (\"emeter\", \"energy\"): BlockAttributeDescription( name=\"Energy\", unit=ENERGY_KILO_WATT_HOUR,", "\"voltage\"): BlockAttributeDescription( name=\"Voltage\", unit=ELECTRIC_POTENTIAL_VOLT, value=lambda value: round(value, 1), device_class=sensor.DEVICE_CLASS_VOLTAGE, state_class=sensor.STATE_CLASS_MEASUREMENT,", "AddEntitiesCallback from homeassistant.helpers.typing import StateType from homeassistant.util import dt from", "), (\"sensor\", \"luminosity\"): BlockAttributeDescription( name=\"Luminosity\", unit=LIGHT_LUX, device_class=sensor.DEVICE_CLASS_ILLUMINANCE, state_class=sensor.STATE_CLASS_MEASUREMENT, ), (\"sensor\"," ]
[ "QUERY_KWARGS['GET']['facet_size']['default'] = 3 QUERY_KWARGS['GET']['facet_size']['max'] = 5 QUERY_KWARGS['POST']['q']['jsoninput'] = True #", "'bts_test' ES_DOC_TYPE = 'gene' ES_SCROLL_SIZE = 60 # ***************************************************************************** #", "\"\"\" import os from biothings.web.settings.default import QUERY_KWARGS # ***************************************************************************** #", "= True # ***************************************************************************** # Elasticsearch Query Builder # *****************************************************************************", "Input Control # ***************************************************************************** # use a smaller size for", "Web settings to override for testing. \"\"\" import os from", "# Endpoints Specifics # ***************************************************************************** STATUS_CHECK = { 'id': '1017',", "# use a smaller size for testing QUERY_KWARGS['GET']['facet_size']['default'] = 3", "use a smaller size for testing QUERY_KWARGS['GET']['facet_size']['default'] = 3 QUERY_KWARGS['GET']['facet_size']['max']", "= 'gene' ES_SCROLL_SIZE = 60 # ***************************************************************************** # User Input", "USERQUERY_DIR = os.path.join(os.path.dirname(__file__), 'userquery') # ***************************************************************************** # Endpoints Specifics #", "= 3 QUERY_KWARGS['GET']['facet_size']['max'] = 5 QUERY_KWARGS['POST']['q']['jsoninput'] = True # *****************************************************************************", "# ***************************************************************************** ALLOW_RANDOM_QUERY = True ALLOW_NESTED_AGGS = True USERQUERY_DIR =", "***************************************************************************** # use a smaller size for testing QUERY_KWARGS['GET']['facet_size']['default'] =", "to override for testing. \"\"\" import os from biothings.web.settings.default import", "import os from biothings.web.settings.default import QUERY_KWARGS # ***************************************************************************** # Elasticsearch", "60 # ***************************************************************************** # User Input Control # ***************************************************************************** #", "# ***************************************************************************** # use a smaller size for testing QUERY_KWARGS['GET']['facet_size']['default']", "testing QUERY_KWARGS['GET']['facet_size']['default'] = 3 QUERY_KWARGS['GET']['facet_size']['max'] = 5 QUERY_KWARGS['POST']['q']['jsoninput'] = True", "'gene' ES_SCROLL_SIZE = 60 # ***************************************************************************** # User Input Control", "***************************************************************************** # Endpoints Specifics # ***************************************************************************** STATUS_CHECK = { 'id':", "from biothings.web.settings.default import QUERY_KWARGS # ***************************************************************************** # Elasticsearch Variables #", "import QUERY_KWARGS # ***************************************************************************** # Elasticsearch Variables # ***************************************************************************** ES_INDEX", "***************************************************************************** STATUS_CHECK = { 'id': '1017', 'index': 'bts_test', 'doc_type': '_all'", "for testing QUERY_KWARGS['GET']['facet_size']['default'] = 3 QUERY_KWARGS['GET']['facet_size']['max'] = 5 QUERY_KWARGS['POST']['q']['jsoninput'] =", "Control # ***************************************************************************** # use a smaller size for testing", "= 'bts_test' ES_DOC_TYPE = 'gene' ES_SCROLL_SIZE = 60 # *****************************************************************************", "True ALLOW_NESTED_AGGS = True USERQUERY_DIR = os.path.join(os.path.dirname(__file__), 'userquery') # *****************************************************************************", "Specifics # ***************************************************************************** STATUS_CHECK = { 'id': '1017', 'index': 'bts_test',", "testing. \"\"\" import os from biothings.web.settings.default import QUERY_KWARGS # *****************************************************************************", "QUERY_KWARGS # ***************************************************************************** # Elasticsearch Variables # ***************************************************************************** ES_INDEX =", "ALLOW_NESTED_AGGS = True USERQUERY_DIR = os.path.join(os.path.dirname(__file__), 'userquery') # ***************************************************************************** #", "# Elasticsearch Query Builder # ***************************************************************************** ALLOW_RANDOM_QUERY = True ALLOW_NESTED_AGGS", "# Elasticsearch Variables # ***************************************************************************** ES_INDEX = 'bts_test' ES_DOC_TYPE =", "True USERQUERY_DIR = os.path.join(os.path.dirname(__file__), 'userquery') # ***************************************************************************** # Endpoints Specifics", "\"\"\" Web settings to override for testing. \"\"\" import os", "# ***************************************************************************** # Elasticsearch Variables # ***************************************************************************** ES_INDEX = 'bts_test'", "ES_DOC_TYPE = 'gene' ES_SCROLL_SIZE = 60 # ***************************************************************************** # User", "os from biothings.web.settings.default import QUERY_KWARGS # ***************************************************************************** # Elasticsearch Variables", "3 QUERY_KWARGS['GET']['facet_size']['max'] = 5 QUERY_KWARGS['POST']['q']['jsoninput'] = True # ***************************************************************************** #", "Query Builder # ***************************************************************************** ALLOW_RANDOM_QUERY = True ALLOW_NESTED_AGGS = True", "STATUS_CHECK = { 'id': '1017', 'index': 'bts_test', 'doc_type': '_all' }", "ES_INDEX = 'bts_test' ES_DOC_TYPE = 'gene' ES_SCROLL_SIZE = 60 #", "biothings.web.settings.default import QUERY_KWARGS # ***************************************************************************** # Elasticsearch Variables # *****************************************************************************", "'userquery') # ***************************************************************************** # Endpoints Specifics # ***************************************************************************** STATUS_CHECK =", "for testing. \"\"\" import os from biothings.web.settings.default import QUERY_KWARGS #", "***************************************************************************** ES_INDEX = 'bts_test' ES_DOC_TYPE = 'gene' ES_SCROLL_SIZE = 60", "= True USERQUERY_DIR = os.path.join(os.path.dirname(__file__), 'userquery') # ***************************************************************************** # Endpoints", "User Input Control # ***************************************************************************** # use a smaller size", "***************************************************************************** # Elasticsearch Query Builder # ***************************************************************************** ALLOW_RANDOM_QUERY = True", "# User Input Control # ***************************************************************************** # use a smaller", "= 60 # ***************************************************************************** # User Input Control # *****************************************************************************", "os.path.join(os.path.dirname(__file__), 'userquery') # ***************************************************************************** # Endpoints Specifics # ***************************************************************************** STATUS_CHECK", "QUERY_KWARGS['POST']['q']['jsoninput'] = True # ***************************************************************************** # Elasticsearch Query Builder #", "# ***************************************************************************** # Elasticsearch Query Builder # ***************************************************************************** ALLOW_RANDOM_QUERY =", "***************************************************************************** # User Input Control # ***************************************************************************** # use a", "# ***************************************************************************** ES_INDEX = 'bts_test' ES_DOC_TYPE = 'gene' ES_SCROLL_SIZE =", "= True ALLOW_NESTED_AGGS = True USERQUERY_DIR = os.path.join(os.path.dirname(__file__), 'userquery') #", "settings to override for testing. \"\"\" import os from biothings.web.settings.default", "smaller size for testing QUERY_KWARGS['GET']['facet_size']['default'] = 3 QUERY_KWARGS['GET']['facet_size']['max'] = 5", "a smaller size for testing QUERY_KWARGS['GET']['facet_size']['default'] = 3 QUERY_KWARGS['GET']['facet_size']['max'] =", "Endpoints Specifics # ***************************************************************************** STATUS_CHECK = { 'id': '1017', 'index':", "True # ***************************************************************************** # Elasticsearch Query Builder # ***************************************************************************** ALLOW_RANDOM_QUERY", "Builder # ***************************************************************************** ALLOW_RANDOM_QUERY = True ALLOW_NESTED_AGGS = True USERQUERY_DIR", "ES_SCROLL_SIZE = 60 # ***************************************************************************** # User Input Control #", "override for testing. \"\"\" import os from biothings.web.settings.default import QUERY_KWARGS", "<filename>tests/web/config.py \"\"\" Web settings to override for testing. \"\"\" import", "5 QUERY_KWARGS['POST']['q']['jsoninput'] = True # ***************************************************************************** # Elasticsearch Query Builder", "***************************************************************************** # Elasticsearch Variables # ***************************************************************************** ES_INDEX = 'bts_test' ES_DOC_TYPE", "Elasticsearch Variables # ***************************************************************************** ES_INDEX = 'bts_test' ES_DOC_TYPE = 'gene'", "***************************************************************************** ALLOW_RANDOM_QUERY = True ALLOW_NESTED_AGGS = True USERQUERY_DIR = os.path.join(os.path.dirname(__file__),", "size for testing QUERY_KWARGS['GET']['facet_size']['default'] = 3 QUERY_KWARGS['GET']['facet_size']['max'] = 5 QUERY_KWARGS['POST']['q']['jsoninput']", "# ***************************************************************************** STATUS_CHECK = { 'id': '1017', 'index': 'bts_test', 'doc_type':", "= 5 QUERY_KWARGS['POST']['q']['jsoninput'] = True # ***************************************************************************** # Elasticsearch Query", "ALLOW_RANDOM_QUERY = True ALLOW_NESTED_AGGS = True USERQUERY_DIR = os.path.join(os.path.dirname(__file__), 'userquery')", "= os.path.join(os.path.dirname(__file__), 'userquery') # ***************************************************************************** # Endpoints Specifics # *****************************************************************************", "# ***************************************************************************** # Endpoints Specifics # ***************************************************************************** STATUS_CHECK = {", "# ***************************************************************************** # User Input Control # ***************************************************************************** # use", "Elasticsearch Query Builder # ***************************************************************************** ALLOW_RANDOM_QUERY = True ALLOW_NESTED_AGGS =", "QUERY_KWARGS['GET']['facet_size']['max'] = 5 QUERY_KWARGS['POST']['q']['jsoninput'] = True # ***************************************************************************** # Elasticsearch", "Variables # ***************************************************************************** ES_INDEX = 'bts_test' ES_DOC_TYPE = 'gene' ES_SCROLL_SIZE" ]
[ "try: model.image.render_variations(replace=False) except FileNotFoundError: logger.error(f\"ERROR: Image file '{img}' is missing\")", "from django.db.utils import OperationalError, ProgrammingError from company.models import Company from", "importing a new dataset, for example \"\"\" import os import", "missing\") except UnidentifiedImageError: logger.error(f\"ERROR: Image file '{img}' is not a", "img.thumbnail.name loc = os.path.join(settings.MEDIA_ROOT, url) if not os.path.exists(loc): logger.info(f\"Generating thumbnail", "import logging from PIL import UnidentifiedImageError from django.core.management.base import BaseCommand", "file '{img}' is not a valid image\") def handle(self, *args,", "thumbnail specified by the \"image\" field of the provided model", "example \"\"\" import os import logging from PIL import UnidentifiedImageError", "import Part logger = logging.getLogger(\"inventree-thumbnails\") class Command(BaseCommand): \"\"\" Rebuild all", "logger.error(f\"ERROR: Image file '{img}' is not a valid image\") def", "\"\"\" Rebuild the thumbnail specified by the \"image\" field of", "rebuild thumbnail images - May be required after importing a", "not os.path.exists(loc): logger.info(f\"Generating thumbnail image for '{img}'\") try: model.image.render_variations(replace=False) except", "dataset, for example \"\"\" import os import logging from PIL", "file '{img}' is missing\") except UnidentifiedImageError: logger.error(f\"ERROR: Image file '{img}'", "= os.path.join(settings.MEDIA_ROOT, url) if not os.path.exists(loc): logger.info(f\"Generating thumbnail image for", "try: self.rebuild_thumbnail(part) except (OperationalError, ProgrammingError): logger.error(\"ERROR: Database read error.\") break", "from PIL import UnidentifiedImageError from django.core.management.base import BaseCommand from django.conf", "Custom management command to rebuild thumbnail images - May be", "model): \"\"\" Rebuild the thumbnail specified by the \"image\" field", "management command to rebuild thumbnail images - May be required", "Company thumbnails\") for company in Company.objects.exclude(image=None): try: self.rebuild_thumbnail(company) except (OperationalError,", "os.path.join(settings.MEDIA_ROOT, url) if not os.path.exists(loc): logger.info(f\"Generating thumbnail image for '{img}'\")", "image\") def handle(self, *args, **kwargs): logger.setLevel(logging.INFO) logger.info(\"Rebuilding Part thumbnails\") for", "UnidentifiedImageError: logger.error(f\"ERROR: Image file '{img}' is not a valid image\")", "is missing\") except UnidentifiedImageError: logger.error(f\"ERROR: Image file '{img}' is not", "- May be required after importing a new dataset, for", "loc = os.path.join(settings.MEDIA_ROOT, url) if not os.path.exists(loc): logger.info(f\"Generating thumbnail image", "part in Part.objects.exclude(image=None): try: self.rebuild_thumbnail(part) except (OperationalError, ProgrammingError): logger.error(\"ERROR: Database", "Database read error.\") break logger.info(\"Rebuilding Company thumbnails\") for company in", "from django.core.management.base import BaseCommand from django.conf import settings from django.db.utils", "class Command(BaseCommand): \"\"\" Rebuild all thumbnail images \"\"\" def rebuild_thumbnail(self,", "PIL import UnidentifiedImageError from django.core.management.base import BaseCommand from django.conf import", "Company.objects.exclude(image=None): try: self.rebuild_thumbnail(company) except (OperationalError, ProgrammingError): logger.error(\"ERROR: abase read error.\")", "logger.setLevel(logging.INFO) logger.info(\"Rebuilding Part thumbnails\") for part in Part.objects.exclude(image=None): try: self.rebuild_thumbnail(part)", "except UnidentifiedImageError: logger.error(f\"ERROR: Image file '{img}' is not a valid", "thumbnails\") for company in Company.objects.exclude(image=None): try: self.rebuild_thumbnail(company) except (OperationalError, ProgrammingError):", "\"\"\" def rebuild_thumbnail(self, model): \"\"\" Rebuild the thumbnail specified by", "the thumbnail specified by the \"image\" field of the provided", "handle(self, *args, **kwargs): logger.setLevel(logging.INFO) logger.info(\"Rebuilding Part thumbnails\") for part in", "in Part.objects.exclude(image=None): try: self.rebuild_thumbnail(part) except (OperationalError, ProgrammingError): logger.error(\"ERROR: Database read", "logger.error(f\"ERROR: Image file '{img}' is missing\") except UnidentifiedImageError: logger.error(f\"ERROR: Image", "not a valid image\") def handle(self, *args, **kwargs): logger.setLevel(logging.INFO) logger.info(\"Rebuilding", "break logger.info(\"Rebuilding Company thumbnails\") for company in Company.objects.exclude(image=None): try: self.rebuild_thumbnail(company)", "Part.objects.exclude(image=None): try: self.rebuild_thumbnail(part) except (OperationalError, ProgrammingError): logger.error(\"ERROR: Database read error.\")", "return img = model.image url = img.thumbnail.name loc = os.path.join(settings.MEDIA_ROOT,", "new dataset, for example \"\"\" import os import logging from", "if not os.path.exists(loc): logger.info(f\"Generating thumbnail image for '{img}'\") try: model.image.render_variations(replace=False)", "thumbnail image for '{img}'\") try: model.image.render_variations(replace=False) except FileNotFoundError: logger.error(f\"ERROR: Image", "FileNotFoundError: logger.error(f\"ERROR: Image file '{img}' is missing\") except UnidentifiedImageError: logger.error(f\"ERROR:", "import Company from part.models import Part logger = logging.getLogger(\"inventree-thumbnails\") class", "images - May be required after importing a new dataset,", "by the \"image\" field of the provided model \"\"\" if", "Part logger = logging.getLogger(\"inventree-thumbnails\") class Command(BaseCommand): \"\"\" Rebuild all thumbnail", "company in Company.objects.exclude(image=None): try: self.rebuild_thumbnail(company) except (OperationalError, ProgrammingError): logger.error(\"ERROR: abase", "model.image: return img = model.image url = img.thumbnail.name loc =", "BaseCommand from django.conf import settings from django.db.utils import OperationalError, ProgrammingError", "import settings from django.db.utils import OperationalError, ProgrammingError from company.models import", "Command(BaseCommand): \"\"\" Rebuild all thumbnail images \"\"\" def rebuild_thumbnail(self, model):", "specified by the \"image\" field of the provided model \"\"\"", "= img.thumbnail.name loc = os.path.join(settings.MEDIA_ROOT, url) if not os.path.exists(loc): logger.info(f\"Generating", "import BaseCommand from django.conf import settings from django.db.utils import OperationalError,", "model.image.render_variations(replace=False) except FileNotFoundError: logger.error(f\"ERROR: Image file '{img}' is missing\") except", "a valid image\") def handle(self, *args, **kwargs): logger.setLevel(logging.INFO) logger.info(\"Rebuilding Part", "field of the provided model \"\"\" if not model.image: return", "self.rebuild_thumbnail(part) except (OperationalError, ProgrammingError): logger.error(\"ERROR: Database read error.\") break logger.info(\"Rebuilding", "url = img.thumbnail.name loc = os.path.join(settings.MEDIA_ROOT, url) if not os.path.exists(loc):", "except (OperationalError, ProgrammingError): logger.error(\"ERROR: Database read error.\") break logger.info(\"Rebuilding Company", "logger.error(\"ERROR: Database read error.\") break logger.info(\"Rebuilding Company thumbnails\") for company", "command to rebuild thumbnail images - May be required after", "the provided model \"\"\" if not model.image: return img =", "except FileNotFoundError: logger.error(f\"ERROR: Image file '{img}' is missing\") except UnidentifiedImageError:", "settings from django.db.utils import OperationalError, ProgrammingError from company.models import Company", "part.models import Part logger = logging.getLogger(\"inventree-thumbnails\") class Command(BaseCommand): \"\"\" Rebuild", "Part thumbnails\") for part in Part.objects.exclude(image=None): try: self.rebuild_thumbnail(part) except (OperationalError,", "import os import logging from PIL import UnidentifiedImageError from django.core.management.base", "not model.image: return img = model.image url = img.thumbnail.name loc", "try: self.rebuild_thumbnail(company) except (OperationalError, ProgrammingError): logger.error(\"ERROR: abase read error.\") break", "be required after importing a new dataset, for example \"\"\"", "valid image\") def handle(self, *args, **kwargs): logger.setLevel(logging.INFO) logger.info(\"Rebuilding Part thumbnails\")", "from company.models import Company from part.models import Part logger =", "\"\"\" Rebuild all thumbnail images \"\"\" def rebuild_thumbnail(self, model): \"\"\"", "url) if not os.path.exists(loc): logger.info(f\"Generating thumbnail image for '{img}'\") try:", "django.db.utils import OperationalError, ProgrammingError from company.models import Company from part.models", "for '{img}'\") try: model.image.render_variations(replace=False) except FileNotFoundError: logger.error(f\"ERROR: Image file '{img}'", "from part.models import Part logger = logging.getLogger(\"inventree-thumbnails\") class Command(BaseCommand): \"\"\"", "model \"\"\" if not model.image: return img = model.image url", "from django.conf import settings from django.db.utils import OperationalError, ProgrammingError from", "May be required after importing a new dataset, for example", "django.core.management.base import BaseCommand from django.conf import settings from django.db.utils import", "required after importing a new dataset, for example \"\"\" import", "logging.getLogger(\"inventree-thumbnails\") class Command(BaseCommand): \"\"\" Rebuild all thumbnail images \"\"\" def", "logging from PIL import UnidentifiedImageError from django.core.management.base import BaseCommand from", "os import logging from PIL import UnidentifiedImageError from django.core.management.base import", "rebuild_thumbnail(self, model): \"\"\" Rebuild the thumbnail specified by the \"image\"", "img = model.image url = img.thumbnail.name loc = os.path.join(settings.MEDIA_ROOT, url)", "thumbnail images - May be required after importing a new", "Rebuild the thumbnail specified by the \"image\" field of the", "'{img}' is not a valid image\") def handle(self, *args, **kwargs):", "UnidentifiedImageError from django.core.management.base import BaseCommand from django.conf import settings from", "provided model \"\"\" if not model.image: return img = model.image", "Rebuild all thumbnail images \"\"\" def rebuild_thumbnail(self, model): \"\"\" Rebuild", "'{img}'\") try: model.image.render_variations(replace=False) except FileNotFoundError: logger.error(f\"ERROR: Image file '{img}' is", "a new dataset, for example \"\"\" import os import logging", "'{img}' is missing\") except UnidentifiedImageError: logger.error(f\"ERROR: Image file '{img}' is", "(OperationalError, ProgrammingError): logger.error(\"ERROR: Database read error.\") break logger.info(\"Rebuilding Company thumbnails\")", "import UnidentifiedImageError from django.core.management.base import BaseCommand from django.conf import settings", "model.image url = img.thumbnail.name loc = os.path.join(settings.MEDIA_ROOT, url) if not", "django.conf import settings from django.db.utils import OperationalError, ProgrammingError from company.models", "\"image\" field of the provided model \"\"\" if not model.image:", "= logging.getLogger(\"inventree-thumbnails\") class Command(BaseCommand): \"\"\" Rebuild all thumbnail images \"\"\"", "\"\"\" Custom management command to rebuild thumbnail images - May", "Image file '{img}' is not a valid image\") def handle(self,", "images \"\"\" def rebuild_thumbnail(self, model): \"\"\" Rebuild the thumbnail specified", "of the provided model \"\"\" if not model.image: return img", "read error.\") break logger.info(\"Rebuilding Company thumbnails\") for company in Company.objects.exclude(image=None):", "if not model.image: return img = model.image url = img.thumbnail.name", "for example \"\"\" import os import logging from PIL import", "ProgrammingError from company.models import Company from part.models import Part logger", "is not a valid image\") def handle(self, *args, **kwargs): logger.setLevel(logging.INFO)", "os.path.exists(loc): logger.info(f\"Generating thumbnail image for '{img}'\") try: model.image.render_variations(replace=False) except FileNotFoundError:", "thumbnails\") for part in Part.objects.exclude(image=None): try: self.rebuild_thumbnail(part) except (OperationalError, ProgrammingError):", "def handle(self, *args, **kwargs): logger.setLevel(logging.INFO) logger.info(\"Rebuilding Part thumbnails\") for part", "**kwargs): logger.setLevel(logging.INFO) logger.info(\"Rebuilding Part thumbnails\") for part in Part.objects.exclude(image=None): try:", "for company in Company.objects.exclude(image=None): try: self.rebuild_thumbnail(company) except (OperationalError, ProgrammingError): logger.error(\"ERROR:", "the \"image\" field of the provided model \"\"\" if not", "\"\"\" if not model.image: return img = model.image url =", "logger = logging.getLogger(\"inventree-thumbnails\") class Command(BaseCommand): \"\"\" Rebuild all thumbnail images", "thumbnail images \"\"\" def rebuild_thumbnail(self, model): \"\"\" Rebuild the thumbnail", "image for '{img}'\") try: model.image.render_variations(replace=False) except FileNotFoundError: logger.error(f\"ERROR: Image file", "to rebuild thumbnail images - May be required after importing", "def rebuild_thumbnail(self, model): \"\"\" Rebuild the thumbnail specified by the", "*args, **kwargs): logger.setLevel(logging.INFO) logger.info(\"Rebuilding Part thumbnails\") for part in Part.objects.exclude(image=None):", "= model.image url = img.thumbnail.name loc = os.path.join(settings.MEDIA_ROOT, url) if", "logger.info(\"Rebuilding Part thumbnails\") for part in Part.objects.exclude(image=None): try: self.rebuild_thumbnail(part) except", "\"\"\" import os import logging from PIL import UnidentifiedImageError from", "logger.info(\"Rebuilding Company thumbnails\") for company in Company.objects.exclude(image=None): try: self.rebuild_thumbnail(company) except", "after importing a new dataset, for example \"\"\" import os", "OperationalError, ProgrammingError from company.models import Company from part.models import Part", "logger.info(f\"Generating thumbnail image for '{img}'\") try: model.image.render_variations(replace=False) except FileNotFoundError: logger.error(f\"ERROR:", "ProgrammingError): logger.error(\"ERROR: Database read error.\") break logger.info(\"Rebuilding Company thumbnails\") for", "in Company.objects.exclude(image=None): try: self.rebuild_thumbnail(company) except (OperationalError, ProgrammingError): logger.error(\"ERROR: abase read", "company.models import Company from part.models import Part logger = logging.getLogger(\"inventree-thumbnails\")", "import OperationalError, ProgrammingError from company.models import Company from part.models import", "Company from part.models import Part logger = logging.getLogger(\"inventree-thumbnails\") class Command(BaseCommand):", "Image file '{img}' is missing\") except UnidentifiedImageError: logger.error(f\"ERROR: Image file", "all thumbnail images \"\"\" def rebuild_thumbnail(self, model): \"\"\" Rebuild the", "for part in Part.objects.exclude(image=None): try: self.rebuild_thumbnail(part) except (OperationalError, ProgrammingError): logger.error(\"ERROR:", "error.\") break logger.info(\"Rebuilding Company thumbnails\") for company in Company.objects.exclude(image=None): try:" ]
[ "184, 195, 1)\" elif background[0] == \"#\" or \"(\" not", "text[:last_percent] _carbon_url = \"https://carbonnowsh.herokuapp.com/\" def code_to_url(code: str) -> str: return", "hex (str): \"\"\" return tuple(int(hex.lstrip(\"#\")[i : i + 2], 16)", "text[:2000].rindex(\"%25\") return text[:last_percent] _carbon_url = \"https://carbonnowsh.herokuapp.com/\" def code_to_url(code: str) ->", "launch from io import * import requests def encode_url(text: str)", "self.bot = bot @commands.command() async def carbonate(self, ctx, *, code):", "background[0] == \"#\" or \"(\" not in background: return f\"rgba{hex_to_rgb(background)", "urllib.parse.quote(first_encoding, safe=\"*\") # Carbonsh encodes text twice def hex_to_rgb(hex: str)", "if \"%25\" not in text: return text[:2000] if text[:2003][:-3] ==", "return background def int_to_px(number) -> str: return f\"{number}px\" def int_to_percent(number)", ": i + 2], 16) for i in (0, 2,", "str: if background == \"\": return \"rgba(171, 184, 195, 1)\"", "elif background[0] == \"#\" or \"(\" not in background: return", "first_encoding = urllib.parse.quote(text, safe=\"*()\") return urllib.parse.quote(first_encoding, safe=\"*\") # Carbonsh encodes", "return f\"{number}px\" def int_to_percent(number) -> str: return f\"{number}%\" def trim_url(text:", "b = BytesIO(r.content) await ctx.send(file=discord.File(fp=b, filename=\"code.png\")) async def setup(bot): await", "\"%25\" not in text: return text[:2000] if text[:2003][:-3] == \"%25\":", "last_percent = text[:2000].rindex(\"%25\") return text[:last_percent] _carbon_url = \"https://carbonnowsh.herokuapp.com/\" def code_to_url(code:", "return f\"{_carbon_url}?&code={trim_url(encode_url(code))}\" class Carbon(commands.Cog): def __init__(self, bot): self.bot = bot", "from .constants import themes, controls, languages, fonts, escales import os", "str: return f\"{_carbon_url}?&code={trim_url(encode_url(code))}\" class Carbon(commands.Cog): def __init__(self, bot): self.bot =", "\"https://carbonnowsh.herokuapp.com/\" def code_to_url(code: str) -> str: return f\"{_carbon_url}?&code={trim_url(encode_url(code))}\" class Carbon(commands.Cog):", "__init__(self, bot): self.bot = bot @commands.command() async def carbonate(self, ctx,", "requests def encode_url(text: str) -> str: first_encoding = urllib.parse.quote(text, safe=\"*()\")", "*, code): carbon_url = code_to_url(code) r = requests.get(carbon_url) b =", "return text[:2000] if text[:2003][:-3] == \"%25\": return text[:2000] last_percent =", "ctx, *, code): carbon_url = code_to_url(code) r = requests.get(carbon_url) b", "def __init__(self, bot): self.bot = bot @commands.command() async def carbonate(self,", "not in text: return text[:2000] if text[:2003][:-3] == \"%25\": return", "import urllib.parse from .constants import themes, controls, languages, fonts, escales", "\"%25\": return text[:2000] last_percent = text[:2000].rindex(\"%25\") return text[:last_percent] _carbon_url =", "from discord.ext import commands import urllib.parse from .constants import themes,", "r = requests.get(carbon_url) b = BytesIO(r.content) await ctx.send(file=discord.File(fp=b, filename=\"code.png\")) async", "background == \"\": return \"rgba(171, 184, 195, 1)\" elif background[0]", "def carbonate(self, ctx, *, code): carbon_url = code_to_url(code) r =", "commands import urllib.parse from .constants import themes, controls, languages, fonts,", "background: return f\"rgba{hex_to_rgb(background) + (1,)}\" return background def int_to_px(number) ->", "_carbon_url = \"https://carbonnowsh.herokuapp.com/\" def code_to_url(code: str) -> str: return f\"{_carbon_url}?&code={trim_url(encode_url(code))}\"", "195, 1)\" elif background[0] == \"#\" or \"(\" not in", "str: if len(text) < 2000: return text if \"%25\" not", "== \"\": return \"rgba(171, 184, 195, 1)\" elif background[0] ==", "return tuple(int(hex.lstrip(\"#\")[i : i + 2], 16) for i in", "languages, fonts, escales import os from pathlib import Path from", "\"\"\" Args: hex (str): \"\"\" return tuple(int(hex.lstrip(\"#\")[i : i +", "import * import requests def encode_url(text: str) -> str: first_encoding", "pathlib import Path from typing import Any # from pyppeteer", "-> str: return f\"{_carbon_url}?&code={trim_url(encode_url(code))}\" class Carbon(commands.Cog): def __init__(self, bot): self.bot", "text[:2003][:-3] == \"%25\": return text[:2000] last_percent = text[:2000].rindex(\"%25\") return text[:last_percent]", "code_to_url(code: str) -> str: return f\"{_carbon_url}?&code={trim_url(encode_url(code))}\" class Carbon(commands.Cog): def __init__(self,", "\"(\" not in background: return f\"rgba{hex_to_rgb(background) + (1,)}\" return background", "str) -> str: if len(text) < 2000: return text if", "text: return text[:2000] if text[:2003][:-3] == \"%25\": return text[:2000] last_percent", "f\"{_carbon_url}?&code={trim_url(encode_url(code))}\" class Carbon(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command()", "if len(text) < 2000: return text if \"%25\" not in", "typing import Any # from pyppeteer import launch from io", "Carbonsh encodes text twice def hex_to_rgb(hex: str) -> tuple: \"\"\"", "from io import * import requests def encode_url(text: str) ->", "16) for i in (0, 2, 4)) def parse_bg(background) ->", "return f\"{number}%\" def trim_url(text: str) -> str: if len(text) <", "bot @commands.command() async def carbonate(self, ctx, *, code): carbon_url =", "# Carbonsh encodes text twice def hex_to_rgb(hex: str) -> tuple:", "import discord from discord.ext import commands import urllib.parse from .constants", "def parse_bg(background) -> str: if background == \"\": return \"rgba(171,", "parse_bg(background) -> str: if background == \"\": return \"rgba(171, 184,", "== \"#\" or \"(\" not in background: return f\"rgba{hex_to_rgb(background) +", "encodes text twice def hex_to_rgb(hex: str) -> tuple: \"\"\" Args:", "code_to_url(code) r = requests.get(carbon_url) b = BytesIO(r.content) await ctx.send(file=discord.File(fp=b, filename=\"code.png\"))", "Path from typing import Any # from pyppeteer import launch", "twice def hex_to_rgb(hex: str) -> tuple: \"\"\" Args: hex (str):", "= bot @commands.command() async def carbonate(self, ctx, *, code): carbon_url", "trim_url(text: str) -> str: if len(text) < 2000: return text", "= requests.get(carbon_url) b = BytesIO(r.content) await ctx.send(file=discord.File(fp=b, filename=\"code.png\")) async def", "1)\" elif background[0] == \"#\" or \"(\" not in background:", "= BytesIO(r.content) await ctx.send(file=discord.File(fp=b, filename=\"code.png\")) async def setup(bot): await bot.add_cog(Carbon(bot))", "for i in (0, 2, 4)) def parse_bg(background) -> str:", "text[:2000] if text[:2003][:-3] == \"%25\": return text[:2000] last_percent = text[:2000].rindex(\"%25\")", "return f\"rgba{hex_to_rgb(background) + (1,)}\" return background def int_to_px(number) -> str:", "controls, languages, fonts, escales import os from pathlib import Path", "background def int_to_px(number) -> str: return f\"{number}px\" def int_to_percent(number) ->", "pyppeteer import launch from io import * import requests def", "def code_to_url(code: str) -> str: return f\"{_carbon_url}?&code={trim_url(encode_url(code))}\" class Carbon(commands.Cog): def", "\"rgba(171, 184, 195, 1)\" elif background[0] == \"#\" or \"(\"", "int_to_px(number) -> str: return f\"{number}px\" def int_to_percent(number) -> str: return", "def int_to_percent(number) -> str: return f\"{number}%\" def trim_url(text: str) ->", "\"#\" or \"(\" not in background: return f\"rgba{hex_to_rgb(background) + (1,)}\"", "Args: hex (str): \"\"\" return tuple(int(hex.lstrip(\"#\")[i : i + 2],", "from pathlib import Path from typing import Any # from", "(0, 2, 4)) def parse_bg(background) -> str: if background ==", "(1,)}\" return background def int_to_px(number) -> str: return f\"{number}px\" def", "if text[:2003][:-3] == \"%25\": return text[:2000] last_percent = text[:2000].rindex(\"%25\") return", "= urllib.parse.quote(text, safe=\"*()\") return urllib.parse.quote(first_encoding, safe=\"*\") # Carbonsh encodes text", "str: return f\"{number}px\" def int_to_percent(number) -> str: return f\"{number}%\" def", "@commands.command() async def carbonate(self, ctx, *, code): carbon_url = code_to_url(code)", "i in (0, 2, 4)) def parse_bg(background) -> str: if", "= text[:2000].rindex(\"%25\") return text[:last_percent] _carbon_url = \"https://carbonnowsh.herokuapp.com/\" def code_to_url(code: str)", "def hex_to_rgb(hex: str) -> tuple: \"\"\" Args: hex (str): \"\"\"", "\"\": return \"rgba(171, 184, 195, 1)\" elif background[0] == \"#\"", "import os from pathlib import Path from typing import Any", "class Carbon(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async", "in background: return f\"rgba{hex_to_rgb(background) + (1,)}\" return background def int_to_px(number)", "import themes, controls, languages, fonts, escales import os from pathlib", "requests.get(carbon_url) b = BytesIO(r.content) await ctx.send(file=discord.File(fp=b, filename=\"code.png\")) async def setup(bot):", "# from pyppeteer import launch from io import * import", "<gh_stars>1-10 import discord from discord.ext import commands import urllib.parse from", "def encode_url(text: str) -> str: first_encoding = urllib.parse.quote(text, safe=\"*()\") return", "= code_to_url(code) r = requests.get(carbon_url) b = BytesIO(r.content) await ctx.send(file=discord.File(fp=b,", "or \"(\" not in background: return f\"rgba{hex_to_rgb(background) + (1,)}\" return", "(str): \"\"\" return tuple(int(hex.lstrip(\"#\")[i : i + 2], 16) for", "\"\"\" return tuple(int(hex.lstrip(\"#\")[i : i + 2], 16) for i", "return text[:2000] last_percent = text[:2000].rindex(\"%25\") return text[:last_percent] _carbon_url = \"https://carbonnowsh.herokuapp.com/\"", "len(text) < 2000: return text if \"%25\" not in text:", "async def carbonate(self, ctx, *, code): carbon_url = code_to_url(code) r", "discord from discord.ext import commands import urllib.parse from .constants import", "from pyppeteer import launch from io import * import requests", "in (0, 2, 4)) def parse_bg(background) -> str: if background", "return text if \"%25\" not in text: return text[:2000] if", "import commands import urllib.parse from .constants import themes, controls, languages,", "-> str: if background == \"\": return \"rgba(171, 184, 195,", "tuple: \"\"\" Args: hex (str): \"\"\" return tuple(int(hex.lstrip(\"#\")[i : i", "+ 2], 16) for i in (0, 2, 4)) def", "hex_to_rgb(hex: str) -> tuple: \"\"\" Args: hex (str): \"\"\" return", "Carbon(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def", "safe=\"*()\") return urllib.parse.quote(first_encoding, safe=\"*\") # Carbonsh encodes text twice def", "bot): self.bot = bot @commands.command() async def carbonate(self, ctx, *,", "-> str: return f\"{number}%\" def trim_url(text: str) -> str: if", "def trim_url(text: str) -> str: if len(text) < 2000: return", "themes, controls, languages, fonts, escales import os from pathlib import", "text if \"%25\" not in text: return text[:2000] if text[:2003][:-3]", "safe=\"*\") # Carbonsh encodes text twice def hex_to_rgb(hex: str) ->", "def int_to_px(number) -> str: return f\"{number}px\" def int_to_percent(number) -> str:", "encode_url(text: str) -> str: first_encoding = urllib.parse.quote(text, safe=\"*()\") return urllib.parse.quote(first_encoding,", "from typing import Any # from pyppeteer import launch from", "4)) def parse_bg(background) -> str: if background == \"\": return", "int_to_percent(number) -> str: return f\"{number}%\" def trim_url(text: str) -> str:", "urllib.parse from .constants import themes, controls, languages, fonts, escales import", "in text: return text[:2000] if text[:2003][:-3] == \"%25\": return text[:2000]", "str) -> tuple: \"\"\" Args: hex (str): \"\"\" return tuple(int(hex.lstrip(\"#\")[i", "Any # from pyppeteer import launch from io import *", "text twice def hex_to_rgb(hex: str) -> tuple: \"\"\" Args: hex", "== \"%25\": return text[:2000] last_percent = text[:2000].rindex(\"%25\") return text[:last_percent] _carbon_url", "code): carbon_url = code_to_url(code) r = requests.get(carbon_url) b = BytesIO(r.content)", "f\"rgba{hex_to_rgb(background) + (1,)}\" return background def int_to_px(number) -> str: return", "text[:2000] last_percent = text[:2000].rindex(\"%25\") return text[:last_percent] _carbon_url = \"https://carbonnowsh.herokuapp.com/\" def", "+ (1,)}\" return background def int_to_px(number) -> str: return f\"{number}px\"", "* import requests def encode_url(text: str) -> str: first_encoding =", "str: first_encoding = urllib.parse.quote(text, safe=\"*()\") return urllib.parse.quote(first_encoding, safe=\"*\") # Carbonsh", "if background == \"\": return \"rgba(171, 184, 195, 1)\" elif", "-> str: return f\"{number}px\" def int_to_percent(number) -> str: return f\"{number}%\"", "not in background: return f\"rgba{hex_to_rgb(background) + (1,)}\" return background def", "str: return f\"{number}%\" def trim_url(text: str) -> str: if len(text)", "i + 2], 16) for i in (0, 2, 4))", "import requests def encode_url(text: str) -> str: first_encoding = urllib.parse.quote(text,", "= \"https://carbonnowsh.herokuapp.com/\" def code_to_url(code: str) -> str: return f\"{_carbon_url}?&code={trim_url(encode_url(code))}\" class", "tuple(int(hex.lstrip(\"#\")[i : i + 2], 16) for i in (0,", "escales import os from pathlib import Path from typing import", "import launch from io import * import requests def encode_url(text:", "return \"rgba(171, 184, 195, 1)\" elif background[0] == \"#\" or", "return text[:last_percent] _carbon_url = \"https://carbonnowsh.herokuapp.com/\" def code_to_url(code: str) -> str:", "carbon_url = code_to_url(code) r = requests.get(carbon_url) b = BytesIO(r.content) await", "-> tuple: \"\"\" Args: hex (str): \"\"\" return tuple(int(hex.lstrip(\"#\")[i :", "f\"{number}px\" def int_to_percent(number) -> str: return f\"{number}%\" def trim_url(text: str)", "-> str: if len(text) < 2000: return text if \"%25\"", "urllib.parse.quote(text, safe=\"*()\") return urllib.parse.quote(first_encoding, safe=\"*\") # Carbonsh encodes text twice", "str) -> str: first_encoding = urllib.parse.quote(text, safe=\"*()\") return urllib.parse.quote(first_encoding, safe=\"*\")", "f\"{number}%\" def trim_url(text: str) -> str: if len(text) < 2000:", "2], 16) for i in (0, 2, 4)) def parse_bg(background)", ".constants import themes, controls, languages, fonts, escales import os from", "2, 4)) def parse_bg(background) -> str: if background == \"\":", "str) -> str: return f\"{_carbon_url}?&code={trim_url(encode_url(code))}\" class Carbon(commands.Cog): def __init__(self, bot):", "return urllib.parse.quote(first_encoding, safe=\"*\") # Carbonsh encodes text twice def hex_to_rgb(hex:", "-> str: first_encoding = urllib.parse.quote(text, safe=\"*()\") return urllib.parse.quote(first_encoding, safe=\"*\") #", "carbonate(self, ctx, *, code): carbon_url = code_to_url(code) r = requests.get(carbon_url)", "io import * import requests def encode_url(text: str) -> str:", "discord.ext import commands import urllib.parse from .constants import themes, controls,", "fonts, escales import os from pathlib import Path from typing", "import Any # from pyppeteer import launch from io import", "< 2000: return text if \"%25\" not in text: return", "2000: return text if \"%25\" not in text: return text[:2000]", "os from pathlib import Path from typing import Any #", "import Path from typing import Any # from pyppeteer import" ]
[ "URN or URL import spotipy_twisted import sys import pprint if", "if len(sys.argv) > 1: urn = sys.argv[1] else: urn =", "import spotipy_twisted import sys import pprint if len(sys.argv) > 1:", "a URN or URL import spotipy_twisted import sys import pprint", "info for a URN or URL import spotipy_twisted import sys", "1: urn = sys.argv[1] else: urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu' sp =", "else: urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu' sp = spotipy_twisted.Spotify() artist = sp.artist(urn)", "shows artist info for a URN or URL import spotipy_twisted", "= sys.argv[1] else: urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu' sp = spotipy_twisted.Spotify() artist", "> 1: urn = sys.argv[1] else: urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu' sp", "sys.argv[1] else: urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu' sp = spotipy_twisted.Spotify() artist =", "pprint if len(sys.argv) > 1: urn = sys.argv[1] else: urn", "for a URN or URL import spotipy_twisted import sys import", "spotipy_twisted import sys import pprint if len(sys.argv) > 1: urn", "len(sys.argv) > 1: urn = sys.argv[1] else: urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu'", "import pprint if len(sys.argv) > 1: urn = sys.argv[1] else:", "sys import pprint if len(sys.argv) > 1: urn = sys.argv[1]", "urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu' sp = spotipy_twisted.Spotify() artist = sp.artist(urn) pprint.pprint(artist)", "or URL import spotipy_twisted import sys import pprint if len(sys.argv)", "<gh_stars>0 # shows artist info for a URN or URL", "artist info for a URN or URL import spotipy_twisted import", "URL import spotipy_twisted import sys import pprint if len(sys.argv) >", "# shows artist info for a URN or URL import", "import sys import pprint if len(sys.argv) > 1: urn =", "urn = sys.argv[1] else: urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu' sp = spotipy_twisted.Spotify()" ]
[ "MCP from adafruit_mcp3xxx.analog_in import AnalogIn # create the spi bus", "AnalogIn(mcp, MCP.P0) print(\"Raw ADC Value: \", chan.value) print(\"ADC Voltage: \"", "# create the spi bus spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)", "Value: \", chan.value) print(\"ADC Voltage: \" + str(chan.voltage) + \"V\")", "import board import adafruit_mcp3xxx.mcp3002 as MCP from adafruit_mcp3xxx.analog_in import AnalogIn", "cs = digitalio.DigitalInOut(board.D5) # create the mcp object mcp =", "on pin 0 chan = AnalogIn(mcp, MCP.P0) print(\"Raw ADC Value:", "MCP.P0) print(\"Raw ADC Value: \", chan.value) print(\"ADC Voltage: \" +", "0 chan = AnalogIn(mcp, MCP.P0) print(\"Raw ADC Value: \", chan.value)", "# create the cs (chip select) cs = digitalio.DigitalInOut(board.D5) #", "from adafruit_mcp3xxx.analog_in import AnalogIn # create the spi bus spi", "create the spi bus spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI) #", "adafruit_mcp3xxx.analog_in import AnalogIn # create the spi bus spi =", "# create the mcp object mcp = MCP.MCP3002(spi, cs) #", "mcp object mcp = MCP.MCP3002(spi, cs) # create an analog", "the spi bus spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI) # create", "pin 0 chan = AnalogIn(mcp, MCP.P0) print(\"Raw ADC Value: \",", "bus spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI) # create the cs", "= MCP.MCP3002(spi, cs) # create an analog input channel on", "import busio import digitalio import board import adafruit_mcp3xxx.mcp3002 as MCP", "mcp = MCP.MCP3002(spi, cs) # create an analog input channel", "input channel on pin 0 chan = AnalogIn(mcp, MCP.P0) print(\"Raw", "cs (chip select) cs = digitalio.DigitalInOut(board.D5) # create the mcp", "import AnalogIn # create the spi bus spi = busio.SPI(clock=board.SCK,", "= digitalio.DigitalInOut(board.D5) # create the mcp object mcp = MCP.MCP3002(spi,", "analog input channel on pin 0 chan = AnalogIn(mcp, MCP.P0)", "channel on pin 0 chan = AnalogIn(mcp, MCP.P0) print(\"Raw ADC", "chan = AnalogIn(mcp, MCP.P0) print(\"Raw ADC Value: \", chan.value) print(\"ADC", "MCP.MCP3002(spi, cs) # create an analog input channel on pin", "digitalio import board import adafruit_mcp3xxx.mcp3002 as MCP from adafruit_mcp3xxx.analog_in import", "digitalio.DigitalInOut(board.D5) # create the mcp object mcp = MCP.MCP3002(spi, cs)", "as MCP from adafruit_mcp3xxx.analog_in import AnalogIn # create the spi", "spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI) # create the cs (chip", "= busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI) # create the cs (chip select)", "spi bus spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI) # create the", "adafruit_mcp3xxx.mcp3002 as MCP from adafruit_mcp3xxx.analog_in import AnalogIn # create the", "the mcp object mcp = MCP.MCP3002(spi, cs) # create an", "the cs (chip select) cs = digitalio.DigitalInOut(board.D5) # create the", "busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI) # create the cs (chip select) cs", "object mcp = MCP.MCP3002(spi, cs) # create an analog input", "create the mcp object mcp = MCP.MCP3002(spi, cs) # create", "board import adafruit_mcp3xxx.mcp3002 as MCP from adafruit_mcp3xxx.analog_in import AnalogIn #", "select) cs = digitalio.DigitalInOut(board.D5) # create the mcp object mcp", "ADC Value: \", chan.value) print(\"ADC Voltage: \" + str(chan.voltage) +", "= AnalogIn(mcp, MCP.P0) print(\"Raw ADC Value: \", chan.value) print(\"ADC Voltage:", "busio import digitalio import board import adafruit_mcp3xxx.mcp3002 as MCP from", "an analog input channel on pin 0 chan = AnalogIn(mcp,", "import adafruit_mcp3xxx.mcp3002 as MCP from adafruit_mcp3xxx.analog_in import AnalogIn # create", "MISO=board.MISO, MOSI=board.MOSI) # create the cs (chip select) cs =", "MOSI=board.MOSI) # create the cs (chip select) cs = digitalio.DigitalInOut(board.D5)", "(chip select) cs = digitalio.DigitalInOut(board.D5) # create the mcp object", "<filename>examples/mcp3xxx_mcp3002_single_ended_simpletest.py import busio import digitalio import board import adafruit_mcp3xxx.mcp3002 as", "import digitalio import board import adafruit_mcp3xxx.mcp3002 as MCP from adafruit_mcp3xxx.analog_in", "AnalogIn # create the spi bus spi = busio.SPI(clock=board.SCK, MISO=board.MISO,", "# create an analog input channel on pin 0 chan", "create the cs (chip select) cs = digitalio.DigitalInOut(board.D5) # create", "print(\"Raw ADC Value: \", chan.value) print(\"ADC Voltage: \" + str(chan.voltage)", "cs) # create an analog input channel on pin 0", "create an analog input channel on pin 0 chan =" ]
[ "['tabular_data'] @data_factory(label=\"ASCII Table\", identifier=has_extension('csv txt tsv tbl dat ' 'csv.gz", "in [astropy_tabular_data, pandas_read_table]: try: return fac(path, **kwargs) except Exception: pass", "has_extension from glue.config import data_factory __all__ = ['tabular_data'] @data_factory(label=\"ASCII Table\",", "' 'dat.gz'), priority=1) def tabular_data(path, **kwargs): from glue.core.data_factories.astropy_table import astropy_tabular_data", "tbl dat ' 'csv.gz txt.gz tbl.bz ' 'dat.gz'), priority=1) def", "glue.core.data_factories.helpers import has_extension from glue.config import data_factory __all__ = ['tabular_data']", "try: return fac(path, **kwargs) except Exception: pass else: raise IOError(\"Could", "'csv.gz txt.gz tbl.bz ' 'dat.gz'), priority=1) def tabular_data(path, **kwargs): from", "Exception: pass else: raise IOError(\"Could not parse file: %s\" %", "fac(path, **kwargs) except Exception: pass else: raise IOError(\"Could not parse", "astropy_tabular_data from glue.core.data_factories.pandas import pandas_read_table for fac in [astropy_tabular_data, pandas_read_table]:", "Table\", identifier=has_extension('csv txt tsv tbl dat ' 'csv.gz txt.gz tbl.bz", "def tabular_data(path, **kwargs): from glue.core.data_factories.astropy_table import astropy_tabular_data from glue.core.data_factories.pandas import", "glue.config import data_factory __all__ = ['tabular_data'] @data_factory(label=\"ASCII Table\", identifier=has_extension('csv txt", "**kwargs): from glue.core.data_factories.astropy_table import astropy_tabular_data from glue.core.data_factories.pandas import pandas_read_table for", "data_factory __all__ = ['tabular_data'] @data_factory(label=\"ASCII Table\", identifier=has_extension('csv txt tsv tbl", "= ['tabular_data'] @data_factory(label=\"ASCII Table\", identifier=has_extension('csv txt tsv tbl dat '", "**kwargs) except Exception: pass else: raise IOError(\"Could not parse file:", "txt tsv tbl dat ' 'csv.gz txt.gz tbl.bz ' 'dat.gz'),", "glue.core.data_factories.pandas import pandas_read_table for fac in [astropy_tabular_data, pandas_read_table]: try: return", "dat ' 'csv.gz txt.gz tbl.bz ' 'dat.gz'), priority=1) def tabular_data(path,", "fac in [astropy_tabular_data, pandas_read_table]: try: return fac(path, **kwargs) except Exception:", "tbl.bz ' 'dat.gz'), priority=1) def tabular_data(path, **kwargs): from glue.core.data_factories.astropy_table import", "@data_factory(label=\"ASCII Table\", identifier=has_extension('csv txt tsv tbl dat ' 'csv.gz txt.gz", "return fac(path, **kwargs) except Exception: pass else: raise IOError(\"Could not", "import pandas_read_table for fac in [astropy_tabular_data, pandas_read_table]: try: return fac(path,", "glue.core.data_factories.astropy_table import astropy_tabular_data from glue.core.data_factories.pandas import pandas_read_table for fac in", "except Exception: pass else: raise IOError(\"Could not parse file: %s\"", "from glue.core.data_factories.astropy_table import astropy_tabular_data from glue.core.data_factories.pandas import pandas_read_table for fac", "for fac in [astropy_tabular_data, pandas_read_table]: try: return fac(path, **kwargs) except", "from glue.core.data_factories.pandas import pandas_read_table for fac in [astropy_tabular_data, pandas_read_table]: try:", "from glue.config import data_factory __all__ = ['tabular_data'] @data_factory(label=\"ASCII Table\", identifier=has_extension('csv", "identifier=has_extension('csv txt tsv tbl dat ' 'csv.gz txt.gz tbl.bz '", "import astropy_tabular_data from glue.core.data_factories.pandas import pandas_read_table for fac in [astropy_tabular_data,", "import data_factory __all__ = ['tabular_data'] @data_factory(label=\"ASCII Table\", identifier=has_extension('csv txt tsv", "priority=1) def tabular_data(path, **kwargs): from glue.core.data_factories.astropy_table import astropy_tabular_data from glue.core.data_factories.pandas", "pandas_read_table]: try: return fac(path, **kwargs) except Exception: pass else: raise", "pandas_read_table for fac in [astropy_tabular_data, pandas_read_table]: try: return fac(path, **kwargs)", "tabular_data(path, **kwargs): from glue.core.data_factories.astropy_table import astropy_tabular_data from glue.core.data_factories.pandas import pandas_read_table", "from glue.core.data_factories.helpers import has_extension from glue.config import data_factory __all__ =", "[astropy_tabular_data, pandas_read_table]: try: return fac(path, **kwargs) except Exception: pass else:", "__all__ = ['tabular_data'] @data_factory(label=\"ASCII Table\", identifier=has_extension('csv txt tsv tbl dat", "' 'csv.gz txt.gz tbl.bz ' 'dat.gz'), priority=1) def tabular_data(path, **kwargs):", "txt.gz tbl.bz ' 'dat.gz'), priority=1) def tabular_data(path, **kwargs): from glue.core.data_factories.astropy_table", "'dat.gz'), priority=1) def tabular_data(path, **kwargs): from glue.core.data_factories.astropy_table import astropy_tabular_data from", "tsv tbl dat ' 'csv.gz txt.gz tbl.bz ' 'dat.gz'), priority=1)", "import has_extension from glue.config import data_factory __all__ = ['tabular_data'] @data_factory(label=\"ASCII", "pass else: raise IOError(\"Could not parse file: %s\" % path)" ]
[ "( Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id) ) return render( request, \"code_doc/authors/author_details.html\", { \"project_list\": project_list,", "projects = Project.objects.filter(administrators=maintainer) return render( request, \"code_doc/maintainer_details.html\", {\"projects\": projects, \"maintainer\":", "permissions_object_getter = \"get_author_from_request\" template_name = \"code_doc/authors/author_edit.html\" pk_url_kwarg = \"author_id\" def", "permissions_on_object = (\"code_doc.author_edit\",) permissions_object_getter = \"get_author_from_request\" template_name = \"code_doc/authors/author_edit.html\" pk_url_kwarg", "..models.projects import Project from ..models.authors import Author from ..forms import", "for editing information about an Author .. note:: in order", "AuthorListView(ListView): \"\"\"A generic view of the authors in a list\"\"\"", "= \"author_id\" def get_author_from_request(self, request, *args, **kwargs): # TODO check", "able to edit an Author, the user should have the", "detail_author(request, author_id): try: author = Author.objects.get(pk=author_id) except Author.DoesNotExist: raise Http404", "return Author.objects.get(pk=kwargs[\"author_id\"]) except Author.DoesNotExist: logger.warning( \"[AuthorUpdateView] non existent Author with", "import render from django.http import Http404 from django.views.generic.edit import UpdateView", "class MaintainerProfileView(View): \"\"\"Manages the views associated to the maintainers\"\"\" @method_decorator(login_required)", "return None class MaintainerProfileView(View): \"\"\"Manages the views associated to the", "= logging.getLogger(__name__) class AuthorListView(ListView): \"\"\"A generic view of the authors", "an Author, the user should have the 'code_doc.author_edit' permission on", "TODO check if needed try: return Author.objects.get(pk=kwargs[\"author_id\"]) except Author.DoesNotExist: logger.warning(", "Http404 project_list = Project.objects.filter(authors=author) coauthor_list = ( Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id) ) return", "= \"code_doc/authors/author_list.html\" context_object_name = \"authors\" model = Author def detail_author(request,", "Project.DoesNotExist: raise Http404 projects = Project.objects.filter(administrators=maintainer) return render( request, \"code_doc/maintainer_details.html\",", "in order to be able to edit an Author, the", "the views associated to the maintainers\"\"\" @method_decorator(login_required) def get(self, request,", "list\"\"\" paginate_by = 10 template_name = \"code_doc/authors/author_list.html\" context_object_name = \"authors\"", "def detail_author(request, author_id): try: author = Author.objects.get(pk=author_id) except Author.DoesNotExist: raise", "to the maintainers\"\"\" @method_decorator(login_required) def get(self, request, maintainer_id): try: maintainer", "# TODO check if needed try: return Author.objects.get(pk=kwargs[\"author_id\"]) except Author.DoesNotExist:", "the maintainers\"\"\" @method_decorator(login_required) def get(self, request, maintainer_id): try: maintainer =", "view of the authors in a list\"\"\" paginate_by = 10", "\"author_id\" def get_author_from_request(self, request, *args, **kwargs): # TODO check if", "form_class = AuthorForm model = Author permissions_on_object = (\"code_doc.author_edit\",) permissions_object_getter", "raise Http404 project_list = Project.objects.filter(authors=author) coauthor_list = ( Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id) )", "request, \"code_doc/authors/author_details.html\", { \"project_list\": project_list, \"author\": author, \"user\": request.user, \"coauthor_list\":", "# logger for this file logger = logging.getLogger(__name__) class AuthorListView(ListView):", ") return render( request, \"code_doc/authors/author_details.html\", { \"project_list\": project_list, \"author\": author,", "\"\"\"View for editing information about an Author .. note:: in", "order to be able to edit an Author, the user", "= \"code_doc/authors/author_edit.html\" pk_url_kwarg = \"author_id\" def get_author_from_request(self, request, *args, **kwargs):", "None class MaintainerProfileView(View): \"\"\"Manages the views associated to the maintainers\"\"\"", "= Author def detail_author(request, author_id): try: author = Author.objects.get(pk=author_id) except", "associated to the maintainers\"\"\" @method_decorator(login_required) def get(self, request, maintainer_id): try:", "\"code_doc/maintainer_details.html\", {\"projects\": projects, \"maintainer\": maintainer}, ) @method_decorator(login_required) def post(self, request):", "kwargs[\"author_id\"] ) return None class MaintainerProfileView(View): \"\"\"Manages the views associated", "Author from ..forms import AuthorForm from .permission_helpers import PermissionOnObjectViewMixin #", "non existent Author with id %s\", kwargs[\"author_id\"] ) return None", "pk_url_kwarg = \"author_id\" def get_author_from_request(self, request, *args, **kwargs): # TODO", "\"code_doc/authors/author_details.html\", { \"project_list\": project_list, \"author\": author, \"user\": request.user, \"coauthor_list\": coauthor_list,", "render( request, \"code_doc/maintainer_details.html\", {\"projects\": projects, \"maintainer\": maintainer}, ) @method_decorator(login_required) def", "django.shortcuts import render from django.http import Http404 from django.views.generic.edit import", "= Project.objects.filter(administrators=maintainer) return render( request, \"code_doc/maintainer_details.html\", {\"projects\": projects, \"maintainer\": maintainer},", "author = Author.objects.get(pk=author_id) except Author.DoesNotExist: raise Http404 project_list = Project.objects.filter(authors=author)", "import UpdateView from django.views.generic import ListView, View from django.contrib.auth.decorators import", "in a list\"\"\" paginate_by = 10 template_name = \"code_doc/authors/author_list.html\" context_object_name", "from django.http import Http404 from django.views.generic.edit import UpdateView from django.views.generic", "= Author permissions_on_object = (\"code_doc.author_edit\",) permissions_object_getter = \"get_author_from_request\" template_name =", "try: return Author.objects.get(pk=kwargs[\"author_id\"]) except Author.DoesNotExist: logger.warning( \"[AuthorUpdateView] non existent Author", "import Project from ..models.authors import Author from ..forms import AuthorForm", "authors in a list\"\"\" paginate_by = 10 template_name = \"code_doc/authors/author_list.html\"", ".. note:: in order to be able to edit an", "..models.authors import Author from ..forms import AuthorForm from .permission_helpers import", "to edit an Author, the user should have the 'code_doc.author_edit'", "logger for this file logger = logging.getLogger(__name__) class AuthorListView(ListView): \"\"\"A", "\"code_doc/authors/author_list.html\" context_object_name = \"authors\" model = Author def detail_author(request, author_id):", "import ListView, View from django.contrib.auth.decorators import login_required from django.contrib.auth.models import", "get_author_from_request(self, request, *args, **kwargs): # TODO check if needed try:", "**kwargs): # TODO check if needed try: return Author.objects.get(pk=kwargs[\"author_id\"]) except", "class AuthorListView(ListView): \"\"\"A generic view of the authors in a", "from django.views.generic import ListView, View from django.contrib.auth.decorators import login_required from", "import PermissionOnObjectViewMixin # logger for this file logger = logging.getLogger(__name__)", "import login_required from django.contrib.auth.models import User from django.utils.decorators import method_decorator", ".permission_helpers import PermissionOnObjectViewMixin # logger for this file logger =", "UpdateView from django.views.generic import ListView, View from django.contrib.auth.decorators import login_required", "User from django.utils.decorators import method_decorator import logging from ..models.projects import", "Http404 from django.views.generic.edit import UpdateView from django.views.generic import ListView, View", "from django.utils.decorators import method_decorator import logging from ..models.projects import Project", "Http404 projects = Project.objects.filter(administrators=maintainer) return render( request, \"code_doc/maintainer_details.html\", {\"projects\": projects,", "UpdateView): \"\"\"View for editing information about an Author .. note::", "{\"projects\": projects, \"maintainer\": maintainer}, ) @method_decorator(login_required) def post(self, request): pass", "from .permission_helpers import PermissionOnObjectViewMixin # logger for this file logger", ") return None class MaintainerProfileView(View): \"\"\"Manages the views associated to", "import User from django.utils.decorators import method_decorator import logging from ..models.projects", "should have the 'code_doc.author_edit' permission on the Author object. \"\"\"", "\"authors\" model = Author def detail_author(request, author_id): try: author =", "django.views.generic.edit import UpdateView from django.views.generic import ListView, View from django.contrib.auth.decorators", "project_list, \"author\": author, \"user\": request.user, \"coauthor_list\": coauthor_list, }, ) class", "render from django.http import Http404 from django.views.generic.edit import UpdateView from", "\"project_list\": project_list, \"author\": author, \"user\": request.user, \"coauthor_list\": coauthor_list, }, )", "\"\"\" form_class = AuthorForm model = Author permissions_on_object = (\"code_doc.author_edit\",)", "template_name = \"code_doc/authors/author_edit.html\" pk_url_kwarg = \"author_id\" def get_author_from_request(self, request, *args,", "= (\"code_doc.author_edit\",) permissions_object_getter = \"get_author_from_request\" template_name = \"code_doc/authors/author_edit.html\" pk_url_kwarg =", "with id %s\", kwargs[\"author_id\"] ) return None class MaintainerProfileView(View): \"\"\"Manages", "needed try: return Author.objects.get(pk=kwargs[\"author_id\"]) except Author.DoesNotExist: logger.warning( \"[AuthorUpdateView] non existent", "Author def detail_author(request, author_id): try: author = Author.objects.get(pk=author_id) except Author.DoesNotExist:", "Author.objects.get(pk=kwargs[\"author_id\"]) except Author.DoesNotExist: logger.warning( \"[AuthorUpdateView] non existent Author with id", "this file logger = logging.getLogger(__name__) class AuthorListView(ListView): \"\"\"A generic view", "return render( request, \"code_doc/authors/author_details.html\", { \"project_list\": project_list, \"author\": author, \"user\":", "@method_decorator(login_required) def get(self, request, maintainer_id): try: maintainer = User.objects.get(pk=maintainer_id) except", "import Http404 from django.views.generic.edit import UpdateView from django.views.generic import ListView,", "Author with id %s\", kwargs[\"author_id\"] ) return None class MaintainerProfileView(View):", "the authors in a list\"\"\" paginate_by = 10 template_name =", "id %s\", kwargs[\"author_id\"] ) return None class MaintainerProfileView(View): \"\"\"Manages the", "Author object. \"\"\" form_class = AuthorForm model = Author permissions_on_object", "\"[AuthorUpdateView] non existent Author with id %s\", kwargs[\"author_id\"] ) return", "\"\"\"Manages the views associated to the maintainers\"\"\" @method_decorator(login_required) def get(self,", "model = Author permissions_on_object = (\"code_doc.author_edit\",) permissions_object_getter = \"get_author_from_request\" template_name", "context_object_name = \"authors\" model = Author def detail_author(request, author_id): try:", "PermissionOnObjectViewMixin # logger for this file logger = logging.getLogger(__name__) class", "Author .. note:: in order to be able to edit", ") class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView): \"\"\"View for editing information about an", "author_id): try: author = Author.objects.get(pk=author_id) except Author.DoesNotExist: raise Http404 project_list", "user should have the 'code_doc.author_edit' permission on the Author object.", "have the 'code_doc.author_edit' permission on the Author object. \"\"\" form_class", "except Project.DoesNotExist: raise Http404 projects = Project.objects.filter(administrators=maintainer) return render( request,", "from ..models.projects import Project from ..models.authors import Author from ..forms", "login_required from django.contrib.auth.models import User from django.utils.decorators import method_decorator import", "the user should have the 'code_doc.author_edit' permission on the Author", "coauthor_list = ( Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id) ) return render( request, \"code_doc/authors/author_details.html\", {", "editing information about an Author .. note:: in order to", "{ \"project_list\": project_list, \"author\": author, \"user\": request.user, \"coauthor_list\": coauthor_list, },", "on the Author object. \"\"\" form_class = AuthorForm model =", "from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.utils.decorators", "from ..models.authors import Author from ..forms import AuthorForm from .permission_helpers", "views associated to the maintainers\"\"\" @method_decorator(login_required) def get(self, request, maintainer_id):", "maintainer = User.objects.get(pk=maintainer_id) except Project.DoesNotExist: raise Http404 projects = Project.objects.filter(administrators=maintainer)", "an Author .. note:: in order to be able to", "'code_doc.author_edit' permission on the Author object. \"\"\" form_class = AuthorForm", "model = Author def detail_author(request, author_id): try: author = Author.objects.get(pk=author_id)", "information about an Author .. note:: in order to be", "except Author.DoesNotExist: raise Http404 project_list = Project.objects.filter(authors=author) coauthor_list = (", "request, maintainer_id): try: maintainer = User.objects.get(pk=maintainer_id) except Project.DoesNotExist: raise Http404", "Project from ..models.authors import Author from ..forms import AuthorForm from", "existent Author with id %s\", kwargs[\"author_id\"] ) return None class", "<filename>code_doc/views/author_views.py from django.shortcuts import render from django.http import Http404 from", "AuthorForm model = Author permissions_on_object = (\"code_doc.author_edit\",) permissions_object_getter = \"get_author_from_request\"", "check if needed try: return Author.objects.get(pk=kwargs[\"author_id\"]) except Author.DoesNotExist: logger.warning( \"[AuthorUpdateView]", "(\"code_doc.author_edit\",) permissions_object_getter = \"get_author_from_request\" template_name = \"code_doc/authors/author_edit.html\" pk_url_kwarg = \"author_id\"", "\"\"\"A generic view of the authors in a list\"\"\" paginate_by", "\"user\": request.user, \"coauthor_list\": coauthor_list, }, ) class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView): \"\"\"View", "import AuthorForm from .permission_helpers import PermissionOnObjectViewMixin # logger for this", "to be able to edit an Author, the user should", "= Project.objects.filter(authors=author) coauthor_list = ( Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id) ) return render( request,", "return render( request, \"code_doc/maintainer_details.html\", {\"projects\": projects, \"maintainer\": maintainer}, ) @method_decorator(login_required)", "try: maintainer = User.objects.get(pk=maintainer_id) except Project.DoesNotExist: raise Http404 projects =", "= 10 template_name = \"code_doc/authors/author_list.html\" context_object_name = \"authors\" model =", "template_name = \"code_doc/authors/author_list.html\" context_object_name = \"authors\" model = Author def", "Author.DoesNotExist: raise Http404 project_list = Project.objects.filter(authors=author) coauthor_list = ( Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id)", "Author, the user should have the 'code_doc.author_edit' permission on the", "request, \"code_doc/maintainer_details.html\", {\"projects\": projects, \"maintainer\": maintainer}, ) @method_decorator(login_required) def post(self,", "maintainer_id): try: maintainer = User.objects.get(pk=maintainer_id) except Project.DoesNotExist: raise Http404 projects", "from ..forms import AuthorForm from .permission_helpers import PermissionOnObjectViewMixin # logger", "author, \"user\": request.user, \"coauthor_list\": coauthor_list, }, ) class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView):", "= Author.objects.get(pk=author_id) except Author.DoesNotExist: raise Http404 project_list = Project.objects.filter(authors=author) coauthor_list", "method_decorator import logging from ..models.projects import Project from ..models.authors import", "project_list = Project.objects.filter(authors=author) coauthor_list = ( Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id) ) return render(", "coauthor_list, }, ) class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView): \"\"\"View for editing information", "except Author.DoesNotExist: logger.warning( \"[AuthorUpdateView] non existent Author with id %s\",", "logger = logging.getLogger(__name__) class AuthorListView(ListView): \"\"\"A generic view of the", "get(self, request, maintainer_id): try: maintainer = User.objects.get(pk=maintainer_id) except Project.DoesNotExist: raise", "paginate_by = 10 template_name = \"code_doc/authors/author_list.html\" context_object_name = \"authors\" model", "django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.utils.decorators import", "for this file logger = logging.getLogger(__name__) class AuthorListView(ListView): \"\"\"A generic", "from django.shortcuts import render from django.http import Http404 from django.views.generic.edit", "\"author\": author, \"user\": request.user, \"coauthor_list\": coauthor_list, }, ) class AuthorUpdateView(PermissionOnObjectViewMixin,", "= \"get_author_from_request\" template_name = \"code_doc/authors/author_edit.html\" pk_url_kwarg = \"author_id\" def get_author_from_request(self,", "be able to edit an Author, the user should have", "%s\", kwargs[\"author_id\"] ) return None class MaintainerProfileView(View): \"\"\"Manages the views", "*args, **kwargs): # TODO check if needed try: return Author.objects.get(pk=kwargs[\"author_id\"])", "= User.objects.get(pk=maintainer_id) except Project.DoesNotExist: raise Http404 projects = Project.objects.filter(administrators=maintainer) return", "import Author from ..forms import AuthorForm from .permission_helpers import PermissionOnObjectViewMixin", "of the authors in a list\"\"\" paginate_by = 10 template_name", "Project.objects.filter(authors=author) coauthor_list = ( Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id) ) return render( request, \"code_doc/authors/author_details.html\",", "edit an Author, the user should have the 'code_doc.author_edit' permission", "object. \"\"\" form_class = AuthorForm model = Author permissions_on_object =", "Author.DoesNotExist: logger.warning( \"[AuthorUpdateView] non existent Author with id %s\", kwargs[\"author_id\"]", "the 'code_doc.author_edit' permission on the Author object. \"\"\" form_class =", "raise Http404 projects = Project.objects.filter(administrators=maintainer) return render( request, \"code_doc/maintainer_details.html\", {\"projects\":", "logging from ..models.projects import Project from ..models.authors import Author from", "permission on the Author object. \"\"\" form_class = AuthorForm model", "if needed try: return Author.objects.get(pk=kwargs[\"author_id\"]) except Author.DoesNotExist: logger.warning( \"[AuthorUpdateView] non", "the Author object. \"\"\" form_class = AuthorForm model = Author", "Author.objects.get(pk=author_id) except Author.DoesNotExist: raise Http404 project_list = Project.objects.filter(authors=author) coauthor_list =", "10 template_name = \"code_doc/authors/author_list.html\" context_object_name = \"authors\" model = Author", "= ( Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id) ) return render( request, \"code_doc/authors/author_details.html\", { \"project_list\":", "django.utils.decorators import method_decorator import logging from ..models.projects import Project from", "\"coauthor_list\": coauthor_list, }, ) class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView): \"\"\"View for editing", "ListView, View from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User", "AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView): \"\"\"View for editing information about an Author ..", "MaintainerProfileView(View): \"\"\"Manages the views associated to the maintainers\"\"\" @method_decorator(login_required) def", "django.views.generic import ListView, View from django.contrib.auth.decorators import login_required from django.contrib.auth.models", "Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id) ) return render( request, \"code_doc/authors/author_details.html\", { \"project_list\": project_list, \"author\":", "Project.objects.filter(administrators=maintainer) return render( request, \"code_doc/maintainer_details.html\", {\"projects\": projects, \"maintainer\": maintainer}, )", "generic view of the authors in a list\"\"\" paginate_by =", "= AuthorForm model = Author permissions_on_object = (\"code_doc.author_edit\",) permissions_object_getter =", "a list\"\"\" paginate_by = 10 template_name = \"code_doc/authors/author_list.html\" context_object_name =", "about an Author .. note:: in order to be able", "note:: in order to be able to edit an Author,", "logger.warning( \"[AuthorUpdateView] non existent Author with id %s\", kwargs[\"author_id\"] )", "django.contrib.auth.models import User from django.utils.decorators import method_decorator import logging from", "from django.contrib.auth.models import User from django.utils.decorators import method_decorator import logging", "= \"authors\" model = Author def detail_author(request, author_id): try: author", "\"code_doc/authors/author_edit.html\" pk_url_kwarg = \"author_id\" def get_author_from_request(self, request, *args, **kwargs): #", "Author permissions_on_object = (\"code_doc.author_edit\",) permissions_object_getter = \"get_author_from_request\" template_name = \"code_doc/authors/author_edit.html\"", "import logging from ..models.projects import Project from ..models.authors import Author", "User.objects.get(pk=maintainer_id) except Project.DoesNotExist: raise Http404 projects = Project.objects.filter(administrators=maintainer) return render(", "..forms import AuthorForm from .permission_helpers import PermissionOnObjectViewMixin # logger for", "\"get_author_from_request\" template_name = \"code_doc/authors/author_edit.html\" pk_url_kwarg = \"author_id\" def get_author_from_request(self, request,", "file logger = logging.getLogger(__name__) class AuthorListView(ListView): \"\"\"A generic view of", "import method_decorator import logging from ..models.projects import Project from ..models.authors", "request, *args, **kwargs): # TODO check if needed try: return", "logging.getLogger(__name__) class AuthorListView(ListView): \"\"\"A generic view of the authors in", "request.user, \"coauthor_list\": coauthor_list, }, ) class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView): \"\"\"View for", "render( request, \"code_doc/authors/author_details.html\", { \"project_list\": project_list, \"author\": author, \"user\": request.user,", "maintainers\"\"\" @method_decorator(login_required) def get(self, request, maintainer_id): try: maintainer = User.objects.get(pk=maintainer_id)", "View from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from", "class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView): \"\"\"View for editing information about an Author", "AuthorForm from .permission_helpers import PermissionOnObjectViewMixin # logger for this file", "try: author = Author.objects.get(pk=author_id) except Author.DoesNotExist: raise Http404 project_list =", "def get(self, request, maintainer_id): try: maintainer = User.objects.get(pk=maintainer_id) except Project.DoesNotExist:", "}, ) class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView): \"\"\"View for editing information about", "def get_author_from_request(self, request, *args, **kwargs): # TODO check if needed", "django.http import Http404 from django.views.generic.edit import UpdateView from django.views.generic import", "from django.views.generic.edit import UpdateView from django.views.generic import ListView, View from" ]
[ "\"generated_files\" # TODO: ugly parent.parent pathing if os.path.exists(GEN_FILE_DIR): sys.path.append(GEN_FILE_DIR) if", "{}\".format(dtb_json.split(os.sep)[-1], arch, soc_sloc)) # Final stats ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch, \"\\nSloc Per Soc,", "import analyses_common as ac # Generated files directory GEN_FILE_DIR =", "this SoC for cmp_str in cmp_strs: driver_sloc = dfc.cmp_str_to_sloc(cmp_str) if", "stats ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch, \"\\nSloc Per Soc, format: [arch : (mean, median,", "with \\'--linux-src-dir\\'\") sys.exit(1) if __name__ == \"__main__\": json_files = ac.argparse_and_get_files(\"Graph", "+= driver_sloc #print(\"{}: {}\".format(cmp_str, driver_sloc)) if arch not in soc_sloc_by_arch:", "dfc.JSON_CMP_STR) avg_sloc_by_arch, sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose = False) # Collection", "cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR) avg_sloc_by_arch, sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose", "sys.path.append(GEN_FILE_DIR) if os.path.exists(os.path.join(GEN_FILE_DIR, \"sloc_cnt.py\")): from sloc_cnt import DRIVER_NAME_TO_SLOC else: print(\"Error:", "__name__ == \"__main__\": json_files = ac.argparse_and_get_files(\"Graph SLOC/SoC data\") soc_sloc_by_arch: Dict[str,", "as json_file: data = json.load(json_file) soc_sloc = 0 arch =", "import os, sys, json from pathlib import Path from typing", "# Generated files directory GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent) + os.sep +", "driver_sloc = avg_sloc_by_arch[arch] soc_sloc += driver_sloc #print(\"{}: {}\".format(cmp_str, driver_sloc)) if", "str(Path(__file__).resolve().parent.parent) + os.sep + \"generated_files\" # TODO: ugly parent.parent pathing", "by arch...\") from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC,", "sys.exit(1) if __name__ == \"__main__\": json_files = ac.argparse_and_get_files(\"Graph SLOC/SoC data\")", "= data[dfc.JSON_ARC] cmp_strs = data[dfc.JSON_CMP_STR] # Total SLOC for this", "data[dfc.JSON_ARC] cmp_strs = data[dfc.JSON_CMP_STR] # Total SLOC for this SoC", "if os.path.exists(GEN_FILE_DIR): sys.path.append(GEN_FILE_DIR) if os.path.exists(os.path.join(GEN_FILE_DIR, \"sloc_cnt.py\")): from sloc_cnt import DRIVER_NAME_TO_SLOC", "from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR) avg_sloc_by_arch,", "print(\"Gathering SLOC average by arch...\") from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch cmp_by_arch", "SLOC file! Run \\'df_analyze.py\\' with \\'--linux-src-dir\\'\") sys.exit(1) if __name__ ==", "file! Run \\'df_analyze.py\\' with \\'--linux-src-dir\\'\") sys.exit(1) if __name__ == \"__main__\":", "[] else: soc_sloc_by_arch[arch].append(soc_sloc) print(\"{} ({}): {}\".format(dtb_json.split(os.sep)[-1], arch, soc_sloc)) # Final", "Generated files directory GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent) + os.sep + \"generated_files\"", "dfc.JSON_ARC, dfc.JSON_CMP_STR) avg_sloc_by_arch, sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose = False) #", "Collection print(\"Iterating DTBs/SoCs...\") for dtb_json in json_files: with open(dtb_json) as", "# Final stats ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch, \"\\nSloc Per Soc, format: [arch :", "import get_sloc_avg_and_list_by_arch cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR) avg_sloc_by_arch, sloc_list_by_arch =", "dfc import analyses_common as ac # Generated files directory GEN_FILE_DIR", "import Path from typing import Dict, List # Internal deps", "json.load(json_file) soc_sloc = 0 arch = data[dfc.JSON_ARC] cmp_strs = data[dfc.JSON_CMP_STR]", "data[dfc.JSON_CMP_STR] # Total SLOC for this SoC for cmp_str in", "from pathlib import Path from typing import Dict, List #", "print(\"Error: no SLOC file! Run \\'df_analyze.py\\' with \\'--linux-src-dir\\'\") sys.exit(1) if", "{} print(\"Gathering SLOC average by arch...\") from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch", "# Total SLOC for this SoC for cmp_str in cmp_strs:", "pathlib import Path from typing import Dict, List # Internal", "= {} print(\"Gathering SLOC average by arch...\") from graph_dd_sloc_by_arch import", "sys.path.append(\"..\") import df_common as dfc import analyses_common as ac #", "ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR) avg_sloc_by_arch, sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose = False)", "parent.parent pathing if os.path.exists(GEN_FILE_DIR): sys.path.append(GEN_FILE_DIR) if os.path.exists(os.path.join(GEN_FILE_DIR, \"sloc_cnt.py\")): from sloc_cnt", "ac.argparse_and_get_files(\"Graph SLOC/SoC data\") soc_sloc_by_arch: Dict[str, List[int]] = {} print(\"Gathering SLOC", "= 0 arch = data[dfc.JSON_ARC] cmp_strs = data[dfc.JSON_CMP_STR] # Total", "deps os.chdir(sys.path[0]) sys.path.append(\"..\") import df_common as dfc import analyses_common as", "# Collection print(\"Iterating DTBs/SoCs...\") for dtb_json in json_files: with open(dtb_json)", "Dict, List # Internal deps os.chdir(sys.path[0]) sys.path.append(\"..\") import df_common as", "TODO: ugly parent.parent pathing if os.path.exists(GEN_FILE_DIR): sys.path.append(GEN_FILE_DIR) if os.path.exists(os.path.join(GEN_FILE_DIR, \"sloc_cnt.py\")):", "analyses_common as ac # Generated files directory GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent)", "arch...\") from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR)", "External deps import os, sys, json from pathlib import Path", "json from pathlib import Path from typing import Dict, List", "in cmp_strs: driver_sloc = dfc.cmp_str_to_sloc(cmp_str) if not driver_sloc: # Closed-source", "ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch, \"\\nSloc Per Soc, format: [arch : (mean, median, std_dev)]\\n\")", "sys, json from pathlib import Path from typing import Dict,", "+ os.sep + \"generated_files\" # TODO: ugly parent.parent pathing if", "df_common as dfc import analyses_common as ac # Generated files", "import DRIVER_NAME_TO_SLOC else: print(\"Error: no SLOC file! Run \\'df_analyze.py\\' with", "data = json.load(json_file) soc_sloc = 0 arch = data[dfc.JSON_ARC] cmp_strs", "= json.load(json_file) soc_sloc = 0 arch = data[dfc.JSON_ARC] cmp_strs =", "directory GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent) + os.sep + \"generated_files\" # TODO:", "json_file: data = json.load(json_file) soc_sloc = 0 arch = data[dfc.JSON_ARC]", "SoC for cmp_str in cmp_strs: driver_sloc = dfc.cmp_str_to_sloc(cmp_str) if not", "if not driver_sloc: # Closed-source driver driver_sloc = avg_sloc_by_arch[arch] soc_sloc", "arch not in soc_sloc_by_arch: soc_sloc_by_arch[arch] = [] else: soc_sloc_by_arch[arch].append(soc_sloc) print(\"{}", "soc_sloc_by_arch: Dict[str, List[int]] = {} print(\"Gathering SLOC average by arch...\")", "cmp_strs = data[dfc.JSON_CMP_STR] # Total SLOC for this SoC for", "driver_sloc)) if arch not in soc_sloc_by_arch: soc_sloc_by_arch[arch] = [] else:", "False) # Collection print(\"Iterating DTBs/SoCs...\") for dtb_json in json_files: with", "with open(dtb_json) as json_file: data = json.load(json_file) soc_sloc = 0", "print(\"Iterating DTBs/SoCs...\") for dtb_json in json_files: with open(dtb_json) as json_file:", "driver_sloc: # Closed-source driver driver_sloc = avg_sloc_by_arch[arch] soc_sloc += driver_sloc", "os.path.exists(GEN_FILE_DIR): sys.path.append(GEN_FILE_DIR) if os.path.exists(os.path.join(GEN_FILE_DIR, \"sloc_cnt.py\")): from sloc_cnt import DRIVER_NAME_TO_SLOC else:", "average by arch...\") from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files,", "files directory GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent) + os.sep + \"generated_files\" #", "json_files: with open(dtb_json) as json_file: data = json.load(json_file) soc_sloc =", "0 arch = data[dfc.JSON_ARC] cmp_strs = data[dfc.JSON_CMP_STR] # Total SLOC", "from typing import Dict, List # Internal deps os.chdir(sys.path[0]) sys.path.append(\"..\")", "import df_common as dfc import analyses_common as ac # Generated", "soc_sloc)) # Final stats ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch, \"\\nSloc Per Soc, format: [arch", "cmp_strs: driver_sloc = dfc.cmp_str_to_sloc(cmp_str) if not driver_sloc: # Closed-source driver", "not driver_sloc: # Closed-source driver driver_sloc = avg_sloc_by_arch[arch] soc_sloc +=", "= [] else: soc_sloc_by_arch[arch].append(soc_sloc) print(\"{} ({}): {}\".format(dtb_json.split(os.sep)[-1], arch, soc_sloc)) #", "open(dtb_json) as json_file: data = json.load(json_file) soc_sloc = 0 arch", "os.chdir(sys.path[0]) sys.path.append(\"..\") import df_common as dfc import analyses_common as ac", "soc_sloc_by_arch[arch].append(soc_sloc) print(\"{} ({}): {}\".format(dtb_json.split(os.sep)[-1], arch, soc_sloc)) # Final stats ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch,", "\"__main__\": json_files = ac.argparse_and_get_files(\"Graph SLOC/SoC data\") soc_sloc_by_arch: Dict[str, List[int]] =", "# TODO: ugly parent.parent pathing if os.path.exists(GEN_FILE_DIR): sys.path.append(GEN_FILE_DIR) if os.path.exists(os.path.join(GEN_FILE_DIR,", "avg_sloc_by_arch[arch] soc_sloc += driver_sloc #print(\"{}: {}\".format(cmp_str, driver_sloc)) if arch not", "= False) # Collection print(\"Iterating DTBs/SoCs...\") for dtb_json in json_files:", "get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose = False) # Collection print(\"Iterating DTBs/SoCs...\") for dtb_json", "dtb_json in json_files: with open(dtb_json) as json_file: data = json.load(json_file)", "as ac # Generated files directory GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent) +", "{}\".format(cmp_str, driver_sloc)) if arch not in soc_sloc_by_arch: soc_sloc_by_arch[arch] = []", "for cmp_str in cmp_strs: driver_sloc = dfc.cmp_str_to_sloc(cmp_str) if not driver_sloc:", "driver driver_sloc = avg_sloc_by_arch[arch] soc_sloc += driver_sloc #print(\"{}: {}\".format(cmp_str, driver_sloc))", "Final stats ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch, \"\\nSloc Per Soc, format: [arch : (mean,", "soc_sloc_by_arch[arch] = [] else: soc_sloc_by_arch[arch].append(soc_sloc) print(\"{} ({}): {}\".format(dtb_json.split(os.sep)[-1], arch, soc_sloc))", "Internal deps os.chdir(sys.path[0]) sys.path.append(\"..\") import df_common as dfc import analyses_common", "sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose = False) # Collection print(\"Iterating DTBs/SoCs...\")", "typing import Dict, List # Internal deps os.chdir(sys.path[0]) sys.path.append(\"..\") import", "ugly parent.parent pathing if os.path.exists(GEN_FILE_DIR): sys.path.append(GEN_FILE_DIR) if os.path.exists(os.path.join(GEN_FILE_DIR, \"sloc_cnt.py\")): from", "if os.path.exists(os.path.join(GEN_FILE_DIR, \"sloc_cnt.py\")): from sloc_cnt import DRIVER_NAME_TO_SLOC else: print(\"Error: no", "arch = data[dfc.JSON_ARC] cmp_strs = data[dfc.JSON_CMP_STR] # Total SLOC for", "Total SLOC for this SoC for cmp_str in cmp_strs: driver_sloc", "SLOC/SoC data\") soc_sloc_by_arch: Dict[str, List[int]] = {} print(\"Gathering SLOC average", "= get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose = False) # Collection print(\"Iterating DTBs/SoCs...\") for", "({}): {}\".format(dtb_json.split(os.sep)[-1], arch, soc_sloc)) # Final stats ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch, \"\\nSloc Per", "driver_sloc = dfc.cmp_str_to_sloc(cmp_str) if not driver_sloc: # Closed-source driver driver_sloc", "= avg_sloc_by_arch[arch] soc_sloc += driver_sloc #print(\"{}: {}\".format(cmp_str, driver_sloc)) if arch", "DTBs/SoCs...\") for dtb_json in json_files: with open(dtb_json) as json_file: data", "os, sys, json from pathlib import Path from typing import", "# Internal deps os.chdir(sys.path[0]) sys.path.append(\"..\") import df_common as dfc import", "pathing if os.path.exists(GEN_FILE_DIR): sys.path.append(GEN_FILE_DIR) if os.path.exists(os.path.join(GEN_FILE_DIR, \"sloc_cnt.py\")): from sloc_cnt import", "no SLOC file! Run \\'df_analyze.py\\' with \\'--linux-src-dir\\'\") sys.exit(1) if __name__", "as dfc import analyses_common as ac # Generated files directory", "#print(\"{}: {}\".format(cmp_str, driver_sloc)) if arch not in soc_sloc_by_arch: soc_sloc_by_arch[arch] =", "os.sep + \"generated_files\" # TODO: ugly parent.parent pathing if os.path.exists(GEN_FILE_DIR):", "deps import os, sys, json from pathlib import Path from", "cmp_str in cmp_strs: driver_sloc = dfc.cmp_str_to_sloc(cmp_str) if not driver_sloc: #", "List # Internal deps os.chdir(sys.path[0]) sys.path.append(\"..\") import df_common as dfc", "if arch not in soc_sloc_by_arch: soc_sloc_by_arch[arch] = [] else: soc_sloc_by_arch[arch].append(soc_sloc)", "print(\"{} ({}): {}\".format(dtb_json.split(os.sep)[-1], arch, soc_sloc)) # Final stats ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch, \"\\nSloc", "arch, soc_sloc)) # Final stats ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch, \"\\nSloc Per Soc, format:", "sloc_cnt import DRIVER_NAME_TO_SLOC else: print(\"Error: no SLOC file! Run \\'df_analyze.py\\'", "soc_sloc += driver_sloc #print(\"{}: {}\".format(cmp_str, driver_sloc)) if arch not in", "# External deps import os, sys, json from pathlib import", "Dict[str, List[int]] = {} print(\"Gathering SLOC average by arch...\") from", "get_sloc_avg_and_list_by_arch cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR) avg_sloc_by_arch, sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch,", "soc_sloc_by_arch: soc_sloc_by_arch[arch] = [] else: soc_sloc_by_arch[arch].append(soc_sloc) print(\"{} ({}): {}\".format(dtb_json.split(os.sep)[-1], arch,", "in soc_sloc_by_arch: soc_sloc_by_arch[arch] = [] else: soc_sloc_by_arch[arch].append(soc_sloc) print(\"{} ({}): {}\".format(dtb_json.split(os.sep)[-1],", "Run \\'df_analyze.py\\' with \\'--linux-src-dir\\'\") sys.exit(1) if __name__ == \"__main__\": json_files", "driver_sloc #print(\"{}: {}\".format(cmp_str, driver_sloc)) if arch not in soc_sloc_by_arch: soc_sloc_by_arch[arch]", "\\'--linux-src-dir\\'\") sys.exit(1) if __name__ == \"__main__\": json_files = ac.argparse_and_get_files(\"Graph SLOC/SoC", "os.path.exists(os.path.join(GEN_FILE_DIR, \"sloc_cnt.py\")): from sloc_cnt import DRIVER_NAME_TO_SLOC else: print(\"Error: no SLOC", "SLOC average by arch...\") from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch cmp_by_arch =", "verbose = False) # Collection print(\"Iterating DTBs/SoCs...\") for dtb_json in", "avg_sloc_by_arch, sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose = False) # Collection print(\"Iterating", "else: soc_sloc_by_arch[arch].append(soc_sloc) print(\"{} ({}): {}\".format(dtb_json.split(os.sep)[-1], arch, soc_sloc)) # Final stats", "else: print(\"Error: no SLOC file! Run \\'df_analyze.py\\' with \\'--linux-src-dir\\'\") sys.exit(1)", "= str(Path(__file__).resolve().parent.parent) + os.sep + \"generated_files\" # TODO: ugly parent.parent", "not in soc_sloc_by_arch: soc_sloc_by_arch[arch] = [] else: soc_sloc_by_arch[arch].append(soc_sloc) print(\"{} ({}):", "ac # Generated files directory GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent) + os.sep", "soc_sloc = 0 arch = data[dfc.JSON_ARC] cmp_strs = data[dfc.JSON_CMP_STR] #", "= dfc.cmp_str_to_sloc(cmp_str) if not driver_sloc: # Closed-source driver driver_sloc =", "dfc.cmp_str_to_sloc(cmp_str) if not driver_sloc: # Closed-source driver driver_sloc = avg_sloc_by_arch[arch]", "== \"__main__\": json_files = ac.argparse_and_get_files(\"Graph SLOC/SoC data\") soc_sloc_by_arch: Dict[str, List[int]]", "+ \"generated_files\" # TODO: ugly parent.parent pathing if os.path.exists(GEN_FILE_DIR): sys.path.append(GEN_FILE_DIR)", "SLOC for this SoC for cmp_str in cmp_strs: driver_sloc =", "\\'df_analyze.py\\' with \\'--linux-src-dir\\'\") sys.exit(1) if __name__ == \"__main__\": json_files =", "= ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR) avg_sloc_by_arch, sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose =", "= ac.argparse_and_get_files(\"Graph SLOC/SoC data\") soc_sloc_by_arch: Dict[str, List[int]] = {} print(\"Gathering", "if __name__ == \"__main__\": json_files = ac.argparse_and_get_files(\"Graph SLOC/SoC data\") soc_sloc_by_arch:", "Closed-source driver driver_sloc = avg_sloc_by_arch[arch] soc_sloc += driver_sloc #print(\"{}: {}\".format(cmp_str,", "data\") soc_sloc_by_arch: Dict[str, List[int]] = {} print(\"Gathering SLOC average by", "from sloc_cnt import DRIVER_NAME_TO_SLOC else: print(\"Error: no SLOC file! Run", "Path from typing import Dict, List # Internal deps os.chdir(sys.path[0])", "\"sloc_cnt.py\")): from sloc_cnt import DRIVER_NAME_TO_SLOC else: print(\"Error: no SLOC file!", "graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR) avg_sloc_by_arch, sloc_list_by_arch", "GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent) + os.sep + \"generated_files\" # TODO: ugly", "DRIVER_NAME_TO_SLOC else: print(\"Error: no SLOC file! Run \\'df_analyze.py\\' with \\'--linux-src-dir\\'\")", "for dtb_json in json_files: with open(dtb_json) as json_file: data =", "import Dict, List # Internal deps os.chdir(sys.path[0]) sys.path.append(\"..\") import df_common", "json_files = ac.argparse_and_get_files(\"Graph SLOC/SoC data\") soc_sloc_by_arch: Dict[str, List[int]] = {}", "in json_files: with open(dtb_json) as json_file: data = json.load(json_file) soc_sloc", "for this SoC for cmp_str in cmp_strs: driver_sloc = dfc.cmp_str_to_sloc(cmp_str)", "= data[dfc.JSON_CMP_STR] # Total SLOC for this SoC for cmp_str", "# Closed-source driver driver_sloc = avg_sloc_by_arch[arch] soc_sloc += driver_sloc #print(\"{}:", "List[int]] = {} print(\"Gathering SLOC average by arch...\") from graph_dd_sloc_by_arch" ]
[ "decay progress = float(self.tokens - self.warmup_tokens) / float( max(1, self.final_tokens", "progress))) lr = self.learning_rate * lr_mult for param_group in optimizer.param_groups:", "(i.e. label is not -100) if self.tokens < self.warmup_tokens: #", "linear warmup lr_mult = float(self.tokens) / float(max(1, self.warmup_tokens)) else: #", "= float(self.tokens) / float(max(1, self.warmup_tokens)) else: # cosine learning rate", "warmup_tokens def on_train_batch_end(self, trainer, pl_module, batch, batch_idx, dataloader_idx): optimizer =", "0 self.final_tokens = final_tokens self.lr_decay = lr_decay self.warmup_tokens = warmup_tokens", "tokens processed this step (i.e. label is not -100) if", "if self.tokens < self.warmup_tokens: # linear warmup lr_mult = float(self.tokens)", "optimizer = trainer.optimizers[0] _, y = batch if self.lr_decay: self.tokens", "batch if self.lr_decay: self.tokens += (y >= 0).sum() # number", "(y >= 0).sum() # number of tokens processed this step", "self.final_tokens = final_tokens self.lr_decay = lr_decay self.warmup_tokens = warmup_tokens def", "self.final_tokens - self.warmup_tokens)) lr_mult = max(0.1, 0.5 * (1.0 +", "self.warmup_tokens: # linear warmup lr_mult = float(self.tokens) / float(max(1, self.warmup_tokens))", "self.warmup_tokens)) lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi *", "of tokens processed this step (i.e. label is not -100)", "warmup lr_mult = float(self.tokens) / float(max(1, self.warmup_tokens)) else: # cosine", "math.cos(math.pi * progress))) lr = self.learning_rate * lr_mult for param_group", "y = batch if self.lr_decay: self.tokens += (y >= 0).sum()", "is not -100) if self.tokens < self.warmup_tokens: # linear warmup", "/ float( max(1, self.final_tokens - self.warmup_tokens)) lr_mult = max(0.1, 0.5", "pl_module, batch, batch_idx, dataloader_idx): optimizer = trainer.optimizers[0] _, y =", "# cosine learning rate decay progress = float(self.tokens - self.warmup_tokens)", "class LearningRateDecayCallback(pl.Callback): def __init__(self, learning_rate, warmup_tokens=<PASSWORD>, final_tokens=<PASSWORD>, lr_decay=True): super().__init__() self.learning_rate", "self.tokens < self.warmup_tokens: # linear warmup lr_mult = float(self.tokens) /", "= self.learning_rate * lr_mult for param_group in optimizer.param_groups: param_group['lr'] =", "label is not -100) if self.tokens < self.warmup_tokens: # linear", "self.learning_rate * lr_mult for param_group in optimizer.param_groups: param_group['lr'] = lr", "= lr_decay self.warmup_tokens = warmup_tokens def on_train_batch_end(self, trainer, pl_module, batch,", "= batch if self.lr_decay: self.tokens += (y >= 0).sum() #", "float(self.tokens) / float(max(1, self.warmup_tokens)) else: # cosine learning rate decay", "= learning_rate self.tokens = 0 self.final_tokens = final_tokens self.lr_decay =", "self.warmup_tokens = warmup_tokens def on_train_batch_end(self, trainer, pl_module, batch, batch_idx, dataloader_idx):", "float(self.tokens - self.warmup_tokens) / float( max(1, self.final_tokens - self.warmup_tokens)) lr_mult", "self.lr_decay: self.tokens += (y >= 0).sum() # number of tokens", "trainer.optimizers[0] _, y = batch if self.lr_decay: self.tokens += (y", "lr_mult = float(self.tokens) / float(max(1, self.warmup_tokens)) else: # cosine learning", "learning_rate self.tokens = 0 self.final_tokens = final_tokens self.lr_decay = lr_decay", "(1.0 + math.cos(math.pi * progress))) lr = self.learning_rate * lr_mult", "on_train_batch_end(self, trainer, pl_module, batch, batch_idx, dataloader_idx): optimizer = trainer.optimizers[0] _,", ">= 0).sum() # number of tokens processed this step (i.e.", "_, y = batch if self.lr_decay: self.tokens += (y >=", "self.lr_decay = lr_decay self.warmup_tokens = warmup_tokens def on_train_batch_end(self, trainer, pl_module,", "def __init__(self, learning_rate, warmup_tokens=<PASSWORD>, final_tokens=<PASSWORD>, lr_decay=True): super().__init__() self.learning_rate = learning_rate", "trainer, pl_module, batch, batch_idx, dataloader_idx): optimizer = trainer.optimizers[0] _, y", "# linear warmup lr_mult = float(self.tokens) / float(max(1, self.warmup_tokens)) else:", "lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))", "+ math.cos(math.pi * progress))) lr = self.learning_rate * lr_mult for", "< self.warmup_tokens: # linear warmup lr_mult = float(self.tokens) / float(max(1,", "number of tokens processed this step (i.e. label is not", "LearningRateDecayCallback(pl.Callback): def __init__(self, learning_rate, warmup_tokens=<PASSWORD>, final_tokens=<PASSWORD>, lr_decay=True): super().__init__() self.learning_rate =", "this step (i.e. label is not -100) if self.tokens <", "* progress))) lr = self.learning_rate * lr_mult for param_group in", "pl class LearningRateDecayCallback(pl.Callback): def __init__(self, learning_rate, warmup_tokens=<PASSWORD>, final_tokens=<PASSWORD>, lr_decay=True): super().__init__()", "+= (y >= 0).sum() # number of tokens processed this", "final_tokens=<PASSWORD>, lr_decay=True): super().__init__() self.learning_rate = learning_rate self.tokens = 0 self.final_tokens", "import pytorch_lightning as pl class LearningRateDecayCallback(pl.Callback): def __init__(self, learning_rate, warmup_tokens=<PASSWORD>,", "def on_train_batch_end(self, trainer, pl_module, batch, batch_idx, dataloader_idx): optimizer = trainer.optimizers[0]", "math import pytorch_lightning as pl class LearningRateDecayCallback(pl.Callback): def __init__(self, learning_rate,", "self.tokens = 0 self.final_tokens = final_tokens self.lr_decay = lr_decay self.warmup_tokens", "self.learning_rate = learning_rate self.tokens = 0 self.final_tokens = final_tokens self.lr_decay", "= float(self.tokens - self.warmup_tokens) / float( max(1, self.final_tokens - self.warmup_tokens))", "0.5 * (1.0 + math.cos(math.pi * progress))) lr = self.learning_rate", "= trainer.optimizers[0] _, y = batch if self.lr_decay: self.tokens +=", "- self.warmup_tokens) / float( max(1, self.final_tokens - self.warmup_tokens)) lr_mult =", "batch_idx, dataloader_idx): optimizer = trainer.optimizers[0] _, y = batch if", "0).sum() # number of tokens processed this step (i.e. label", "final_tokens self.lr_decay = lr_decay self.warmup_tokens = warmup_tokens def on_train_batch_end(self, trainer,", "progress = float(self.tokens - self.warmup_tokens) / float( max(1, self.final_tokens -", "= warmup_tokens def on_train_batch_end(self, trainer, pl_module, batch, batch_idx, dataloader_idx): optimizer", "rate decay progress = float(self.tokens - self.warmup_tokens) / float( max(1,", "self.tokens += (y >= 0).sum() # number of tokens processed", "else: # cosine learning rate decay progress = float(self.tokens -", "float( max(1, self.final_tokens - self.warmup_tokens)) lr_mult = max(0.1, 0.5 *", "learning rate decay progress = float(self.tokens - self.warmup_tokens) / float(", "step (i.e. label is not -100) if self.tokens < self.warmup_tokens:", "float(max(1, self.warmup_tokens)) else: # cosine learning rate decay progress =", "self.warmup_tokens) / float( max(1, self.final_tokens - self.warmup_tokens)) lr_mult = max(0.1,", "lr_decay=True): super().__init__() self.learning_rate = learning_rate self.tokens = 0 self.final_tokens =", "pytorch_lightning as pl class LearningRateDecayCallback(pl.Callback): def __init__(self, learning_rate, warmup_tokens=<PASSWORD>, final_tokens=<PASSWORD>,", "lr = self.learning_rate * lr_mult for param_group in optimizer.param_groups: param_group['lr']", "max(1, self.final_tokens - self.warmup_tokens)) lr_mult = max(0.1, 0.5 * (1.0", "processed this step (i.e. label is not -100) if self.tokens", "/ float(max(1, self.warmup_tokens)) else: # cosine learning rate decay progress", "= max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress))) lr", "cosine learning rate decay progress = float(self.tokens - self.warmup_tokens) /", "self.warmup_tokens)) else: # cosine learning rate decay progress = float(self.tokens", "not -100) if self.tokens < self.warmup_tokens: # linear warmup lr_mult", "# number of tokens processed this step (i.e. label is", "lr_decay self.warmup_tokens = warmup_tokens def on_train_batch_end(self, trainer, pl_module, batch, batch_idx,", "warmup_tokens=<PASSWORD>, final_tokens=<PASSWORD>, lr_decay=True): super().__init__() self.learning_rate = learning_rate self.tokens = 0", "import math import pytorch_lightning as pl class LearningRateDecayCallback(pl.Callback): def __init__(self,", "super().__init__() self.learning_rate = learning_rate self.tokens = 0 self.final_tokens = final_tokens", "batch, batch_idx, dataloader_idx): optimizer = trainer.optimizers[0] _, y = batch", "- self.warmup_tokens)) lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi", "dataloader_idx): optimizer = trainer.optimizers[0] _, y = batch if self.lr_decay:", "* (1.0 + math.cos(math.pi * progress))) lr = self.learning_rate *", "as pl class LearningRateDecayCallback(pl.Callback): def __init__(self, learning_rate, warmup_tokens=<PASSWORD>, final_tokens=<PASSWORD>, lr_decay=True):", "-100) if self.tokens < self.warmup_tokens: # linear warmup lr_mult =", "max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress))) lr =", "__init__(self, learning_rate, warmup_tokens=<PASSWORD>, final_tokens=<PASSWORD>, lr_decay=True): super().__init__() self.learning_rate = learning_rate self.tokens", "= 0 self.final_tokens = final_tokens self.lr_decay = lr_decay self.warmup_tokens =", "= final_tokens self.lr_decay = lr_decay self.warmup_tokens = warmup_tokens def on_train_batch_end(self,", "learning_rate, warmup_tokens=<PASSWORD>, final_tokens=<PASSWORD>, lr_decay=True): super().__init__() self.learning_rate = learning_rate self.tokens =", "if self.lr_decay: self.tokens += (y >= 0).sum() # number of" ]
[ "set to True, all Apprise Config files marked to be", "dealing # with a YAML file. # - If we", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "six.string_types): # We're just a simple URL string... schema =", "if not otherwise detected encoding = 'utf-8' # The default", "when working with the configurations that inherit this class. By", "= \\ self.default_config_format \\ if self.config_format is None else self.config_format", "specified {}.'.format(version)) return (list(), list()) # # global asset object", "read() causes the child class to do whatever it takes", "{}, entry #{}'.format( url, no + 1)) continue # add", "format ({}) was specified'.format( config_format)) return (list(), list()) # Dynamically", "ensures our tags are set correctly if 'tag' in _results:", "parse. verify_host (:obj:`bool`, optional): a flag kept with the parsed", "Invalid; correct it tokens[kw] = dict() # strip out processed", "made to retrieve the (same) data. This method can be", "default if one wasn't one detected # or enfored. config_format", "Version version = result.get('version', 1) if version != 1: #", "'Unsupported URL, entry #{}'.format(no + 1)) continue _results = plugins.url_to_dict(_url)", "Invalid syntax ConfigBase.logger.error( 'Invalid Apprise YAML version specified {}.'.format(version)) return", "< 0: err = 'A negative cache value ({}) was", "our URL results = SCHEMA_MAP[schema].parse_url(url) if not results: # Failed", "# Define what a valid line should look like valid_line_re", "in tokens: # Copy ourselves a template of our parsed", "\"\"\" Parse the specified content as though it were a", "**kwargs): \"\"\" Takes the specified content and attempts to detect", "result = yaml.load(content, Loader=yaml.SafeLoader) except (AttributeError, yaml.parser.ParserError, yaml.error.MarkedYAMLError) as e:", "self.config_format = kwargs.get('format').lower() if self.config_format not in CONFIG_FORMATS: # Simple", "url return results # Allow overriding the default config format", "if config_format not in CONFIG_FORMATS: # Invalid configuration type specified", "multiple ones separated by a # comma and/or space includes", "we remotely retrieve also has an `include` reference, we will", "intend to use it. insecure_include by default are disabled. When", "import ConfigFormat from ..common import CONFIG_FORMATS from ..common import ContentIncludeMode", "using the # parsed URL information plugin = plugins.SCHEMA_MAP[_results['schema']](**_results) #", "Parse the specified content as though it were a yaml", "level results['recursion'] = self.recursion - 1 # Insecure Includes flag", "the loaded notification servers \"\"\" if not isinstance(self._cached_servers, list): #", "limit reached; ignoring Include URL: %s' % url) if self._cached_servers:", "one level results['recursion'] = self.recursion - 1 # Insecure Includes", "to associate with the newly added # notifications if any", "keys (if SSL transactions take place). Unless under very specific", "apprise developer may wish to load configuration from memory (in", "Our cached response object self._cached_servers = list() # read() causes", "at self._cached_time = time.time() # Nothing more to do; return", "'Failed to load Apprise configuration from {}'.format( self.url())) # Set", "1)) continue # add our results to our global set", "URL as a base # to work with r =", "permission notice shall be included in # all copies or", "since we don't know # what the format is yet", "the (same) data. This method can be somewhat inefficient if", "the # parsed URL information plugin = plugins.SCHEMA_MAP[results['schema']](**results) # Create", "string or multiple ones separated by a # comma and/or", "# We can't set a function or non-string set value", "entries from {}'.format( len(self._cached_servers), self.url())) else: self.logger.warning( 'Failed to load", "response object self._cached_servers = list() # read() causes the child", "list) and self.cache: # We have enough reason to look", "look further into our cached content # and verify it", "# -*- coding: utf-8 -*- # # Copyright (C) 2020", "(same) data. This method can be somewhat inefficient if disabled.", "our asset object with the new value setattr(asset, k, v.strip())", "..AppriseAsset import AppriseAsset from ..URLBase import URLBase from ..common import", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "our # content again. age_in_sec = time.time() - self._cached_time if", "existing compilation. If the file we remotely retrieve also has", "in kwargs \\ and isinstance(kwargs['format'], six.string_types): # Store the enforced", "containing all of the information parsed from our URL results", "line)) # Assume this is a file we shouldn't be", "Apprise YAML based configuration specified.') return (list(), list()) # YAML", "None: # Plan B is to assume we're dealing with", "set the cache value to an int identifying the number", "on line {}.'.format(url, line)) continue # Build a list of", "content for speed self._cached_servers = None # Initialize our recursion", "None), dict): # Invalid; correct it tokens[kw] = dict() #", "self.cache: # We have enough reason to look further into", "by Apprise object. # The below ensures our tags are", "# First try to get it's integer value try: results['cache']", "Track our entries entry = 0 while len(results): # Increment", "contains 'include' entries (even file:// based ones). In these circumstances", "is returned. \"\"\" results = URLBase.parse_url(url, verify_host=verify_host) if not results:", "specified, then it is auto detected. \"\"\" if config_format is", "of content we can pull from self.servers() return len(self._cached_servers) def", "early exit return None # Attempt to detect configuration if", "enforced config format self.config_format = kwargs.get('format').lower() if self.config_format not in", "include a file:// one it woul fail. However this include", "no urls urls = list() # Iterate over each URL", "schema _schema = GET_SCHEMA_RE.match(key) if _schema is None: # Log", "Software without restriction, including without limitation the rights # to", "can not be used. ConfigBase.logger.warning( 'Could not load URL {}", "Returns: A dictionary is returned containing the URL fully parsed", "detected; recursively populate them # If we have been configured", "valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(\\s*(?P<tags>[^=]+)=|=)?\\s*' r'(?P<url>[a-z0-9]{2,9}://.*)|' r'include\\s+(?P<config>.+))?\\s*$', re.I) try: #", "= list() # Iterate over each URL for no, url", "schema in plugins.SCHEMA_MAP: tokens = ConfigBase.__extract_special_tokens( schema, tokens) # Copy", "Detect any matches matches = \\ {k[1:]: str(v) for k,", "yaml entries parsed. The idea here is we can post", "must set strings with a string ConfigBase.logger.warning( 'Invalid asset value", "Attempt to acquire the schema at the very least to", "= getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Execute our config parse function which", "can now add this servers found # in this configuration", "the user match = VALID_TOKEN.match(key) if not match: ConfigBase.logger.warning( 'Ignoring", "URL: {}'.format(plugin.url())) except Exception as e: # the arguments are", "using the 'include' # line found in configuration files. allow_cross_includes", "set)): # populate and/or override any results populated by #", "entries when loaded. include <ConfigURL> \"\"\" # A list of", "last acquired schema schema = None for key, tokens in", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "list()) url, config = result.group('url'), result.group('config') if not (url or", "tags are set correctly if 'tag' in _results: # Tidy", "cached response object self._cached_servers = list() # read() causes the", "is None: # Detect the format config_format = ConfigBase.detect_config_format(content) if", "included in # all copies or substantial portions of the", "(list(), list()) # YAML Version version = result.get('version', 1) if", "of additional configuration files referenced. You may also optionally associate", "returned if our content was downloaded correctly. \"\"\" if not", "if you do intend to use it. insecure_include by default", "understand the consequences. You can alternatively set the cache value", "string type ConfigBase.logger.error( 'Invalid Apprise TEXT based configuration specified.') return", "later use to verify SSL keys (if SSL transactions take", "tokens = result.get('asset', None) if tokens and isinstance(tokens, dict): for", "# them. <Tag(s)>=<URL> # Or you can use this format", "'Loaded URL: {}'.format(plugin.url())) except Exception as e: # the arguments", "USE OR OTHER DEALINGS IN # THE SOFTWARE. import os", "2 deep. If set to zero it is off. There", "like (servers, configs) where: - servers contains a list of", "space includes = parse_urls(includes) elif not isinstance(includes, (list, tuple)): #", "_results = plugins.url_to_dict(url) if _results is None: ConfigBase.logger.warning( 'Unparseable URL", "in the Asset is a boolean, then # we want", "call can be made to retrieve the (same) data. This", "v is None: # Convert to an empty string v", "not isinstance(includes, (list, tuple)): # Not a problem; we simply", "based configuration file attempted to include a file:// one it", "schema = None for key, tokens in it: # Test", "a copy of our dictionary tokens = tokens.copy() for kw,", "through it if recursion is set to 2 deep. If", "returns None \"\"\" # Detect Format Logic: # - A", "URL results['insecure_includes'] = self.insecure_includes try: # Attempt to create an", "# Not a problem; we simply have no urls urls", "set to True. There are cases where a self hosting", "= re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(\\s*(?P<tags>[^=]+)=|=)?\\s*' r'(?P<url>[a-z0-9]{2,9}://.*)|' r'include\\s+(?P<config>.+))?\\s*$', re.I) try: # split", "it were a yaml file specifically formatted for Apprise. Return", "isinstance(cache, bool) else int(cache) if self.cache < 0: err =", "re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(\\s*(?P<tags>[^=]+)=|=)?\\s*' r'(?P<url>[a-z0-9]{2,9}://.*)|' r'include\\s+(?P<config>.+))?\\s*$', re.I) try: # split our", "re.compile( r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I) class ConfigBase(URLBase): \"\"\" This is the base", "def config_parse_yaml(content, asset=None): \"\"\" Parse the specified content as though", "a base # to work with r = _results.copy() #", "# Store the url and ignore arguments associated configs.extend(u for", "URLBase from ..common import ConfigFormat from ..common import CONFIG_FORMATS from", "consistent when working with the configurations that inherit this class.", "2020 <NAME> <<EMAIL>> # All rights reserved. # # This", "if 'cache' in results['qsd']: # First try to get it's", "# the include keyword configs = list() # Define what", "0 while len(results): # Increment our entry count entry +=", "safe guard then # anything else. 128KB (131072B) max_buffer_size =", "if recursion is set to 2 deep. If set to", "in tokens.items() if not k.startswith(prefix)} # Update our entries tokens[kw].update(matches)", "base protocols ConfigBase.logger.warning( 'Including {}:// based configuration is prohibited. '", "or k.endswith('_'): # Entries are considered reserved if they start", "is True: # we have not expired, return False return", "config_path = os.getcwd() def __init__(self, cache=True, recursion=0, insecure_includes=False, **kwargs): \"\"\"", "line)) continue # Build a list of tags to associate", "object as dictionary # containing all of the information parsed", "example, only a file:// based configuration can include another file://", "# A list of additional configuration files referenced using #", "has not expired. if self.cache is True: # we have", "default, the last element of the list is removed. \"\"\"", "pull from self.servers() return iter(self._cached_servers) def __len__(self): \"\"\" Returns the", "has something to take action # with. ConfigBase.logger.warning( 'Invalid URL", "the content was last retrieved on. This place a role", "= asset if isinstance(asset, AppriseAsset) else self.asset # Execute our", "r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(\\s*(?P<tags>[^=]+)=|=)?\\s*' r'(?P<url>[a-z0-9]{2,9}://.*)|' r'include\\s+(?P<config>.+))?\\s*$', re.I) try: # split our content", "dict): # Invalid; correct it tokens[kw] = dict() # strip", "content was last retrieved on. This place a role #", "# Insecure Includes flag can never be parsed from the", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "expired(self): \"\"\" Simply returns True if the configuration should be", "# Prepare our Asset Object results['asset'] = \\ asset if", "if k.startswith(prefix)} if not matches: # we're done with this", "= ConfigFormat.TEXT return config_format @staticmethod def config_parse(content, asset=None, config_format=None, **kwargs):", "else self.asset # Execute our config parse function which always", "is None: # the loop above failed to match anything", "kwargs \\ and isinstance(kwargs['format'], six.string_types): # Store the enforced config", "Configuration files were detected; recursively populate them # If we", "template of our parsed URL as a base # to", "= _results.copy() # We are a url string with additional", "If a format isn't specified, then it is auto detected.", "k.startswith(prefix)} if not matches: # we're done with this entry", "not isinstance(content, six.string_types): # Set the time our content was", "we must set strings with a string ConfigBase.logger.warning( 'Invalid asset", "config format self.config_format = kwargs.get('format').lower() if self.config_format not in CONFIG_FORMATS:", "re.I) class ConfigBase(URLBase): \"\"\" This is the base class for", "you want to fully parse. verify_host (:obj:`bool`, optional): a flag", "default all configuration is not includable using the 'include' #", "pound/hashtag allow for line comments # # One or more", "child classes will later use to verify SSL keys (if", "expected string type ConfigBase.logger.error( 'Invalid Apprise TEXT based configuration specified.')", "\\ asset if isinstance(asset, AppriseAsset) else AppriseAsset() try: # Attempt", "we find a string that starts with a URL, or", "# Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML data specified.') ConfigBase.logger.debug(", "type if detected, otherwise it returns None \"\"\" # Detect", "add our result set r.update(tokens) # add our results to", "if you want these 'include' entries to be honored, this", "# Assume this is a file we shouldn't be parsing.", "URL {} on line {}.'.format(url, line)) continue # Build a", "True. There are cases where a self hosting apprise developer", "our parsed URL as a base # to work with", "we are not caching our response and are required to", "in results['qsd']: # First try to get it's integer value", "128KB (131072B) max_buffer_size = 131072 # By default all configuration", "we don't know # what the format is yet config_format", "config: ConfigBase.logger.debug('Include URL: {}'.format(config)) # Store our include line configs.append(config.strip())", "ConfigBase.__extract_special_tokens( schema, entries) # Extend our dictionary with our new", "This keyword requires us to fetch more configuration from another", "None: # Failed to parse the server URL ConfigBase.logger.warning( 'Unparseable", "from ..utils import GET_SCHEMA_RE from ..utils import parse_list from ..utils", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "value setattr(asset, k, v.strip()) else: # we must set strings", "default are disabled. When set to True, all Apprise Config", "expired. recursion defines how deep we recursively handle entries that", "== \\ ContentIncludeMode.NEVER: # Prevent the loading if insecure base", "global set results.append(_results) else: # Unsupported ConfigBase.logger.warning( 'Unsupported Apprise YAML", "\\ {k[1:]: str(v) for k, v in tokens.items() if k.startswith(prefix)}", "# Simple error checking err = 'An invalid config format", "notice shall be included in # all copies or substantial", "{k[1:]: str(v) for k, v in tokens.items() if k.startswith(prefix)} if", "of content we can pull from self.servers() return iter(self._cached_servers) def", "equal sign we know # we're dealing with a TEXT", "None # Don't read any more of this amount of", "using the # parsed URL information cfg_plugin = SCHEMA_MAP[results['schema']](**results) except", "configuration files referenced using # the include keyword configs =", "on the first non-comment and non blank line # matched.", "plugins - configs contains a list of additional configuration files", "= insecure_includes if 'encoding' in kwargs: # Store the encoding", "with an underscore ConfigBase.logger.warning( 'Ignored asset key \"{}\".'.format(k)) continue if", "@staticmethod def config_parse_text(content, asset=None): \"\"\" Parse the specified content as", "Invalid syntax ConfigBase.logger.error( 'Invalid Apprise TEXT configuration format found '", "just a simple URL string... schema = GET_SCHEMA_RE.match(url) if schema", "% url) if self._cached_servers: self.logger.info('Loaded {} entries from {}'.format( len(self._cached_servers),", "-*- coding: utf-8 -*- # # Copyright (C) 2020 <NAME>", "sign we know # we're dealing with a TEXT format.", "= {k: v for k, v in tokens.items() if not", "True: # we have not expired, return False return False", "time.time() # Nothing more to do; return our empty cache", "Apprise TEXT based configuration specified.') return (list(), list()) for line,", "and this permission notice shall be included in # all", "parse function which always returns a tuple # of our", "\"\"\" Performs reads loaded configuration and returns all of the", "detect configuration') return (list(), list()) if config_format not in CONFIG_FORMATS:", "self.servers() return iter(self._cached_servers) def __len__(self): \"\"\" Returns the total number", "a list of content we can pull from self.servers() return", "Comment/empty line; do nothing continue if config: ConfigBase.logger.debug('Include URL: {}'.format(config))", "integer; now treat it as a bool # instead: results['cache']", "data specified.') ConfigBase.logger.debug( 'YAML Exception:{}{}'.format(os.linesep, e)) return (list(), list()) if", "GET_SCHEMA_RE.match(key) if _schema is None: # Log invalid entries so", "# Increment our entry count entry += 1 # Grab", "information plugin = plugins.SCHEMA_MAP[_results['schema']](**_results) # Create log entry of loaded", "config_format)) return (list(), list()) # Dynamically load our parse_ function", "read the error printed to screen and take action #", "def __bool__(self): \"\"\" Allows the Apprise object to be wrapped", "URL ConfigBase.logger.debug( 'Loaded URL: {}'.format(plugin.url())) except Exception as e: #", "or simply no data content = self.read(**kwargs) if not isinstance(content,", "Apprise. Args: url (str): The URL you want to fully", "a single inline string or multiple ones separated by #", "THE SOFTWARE. import os import re import six import yaml", "can never be parsed from the URL; we decrement #", "servers \"\"\" if not isinstance(self._cached_servers, list): # Generate ourselves a", "of seconds the previously retrieved can exist for before it", "return our empty cache list return self._cached_servers # Our Configuration", "by line content = re.split(r'\\r*\\n', content) except TypeError: # content", "URL for no, url in enumerate(urls): # Our results object", "the specified content and attempts to detect the format type", "\"\"\" Allows the Apprise object to be wrapped in an", "as e: # the arguments are invalid or can not", "'schema' in entries: del entries['schema'] # support our special tokens", "'+') # Detect any matches matches = \\ {k[1:]: str(v)", "Generate ourselves a list of content we can pull from", "config_parse(content, asset=None, config_format=None, **kwargs): \"\"\" Takes the specified config content", "False # Recursion can never be parsed from the URL;", "the specified string to # match that. setattr(asset, k, parse_bool(v))", "import yaml import time from .. import plugins from ..AppriseAsset", "is auto detected. \"\"\" if config_format is None: # Detect", "specified.') return (list(), list()) # YAML Version version = result.get('version',", "not be used. ConfigBase.logger.warning( 'Could not load URL {} on", "treated as being in ALWAYS mode. Take a file:// based", "# Verify our cache time to determine whether we will", "# Detect the format config_format = ConfigBase.detect_config_format(content) if not config_format:", "checking err = 'An invalid config format ({}) was specified.'.format(", "'include' entries (even file:// based ones). In these circumstances if", "ConfigBase.detect_config_format(content) if not config_format: # We couldn't detect configuration ConfigBase.logger.error('Could", "was loaded return (servers, configs) @staticmethod def config_parse_yaml(content, asset=None): \"\"\"", "tokens tokens = {k: v for k, v in tokens.items()", "not expired. if self.cache is True: # we have not", "list of additional configuration files referenced. You may also optionally", "is None: # Plan B is to assume we're dealing", "# Determine our prefix: prefix = meta.get('prefix', '+') # Detect", "string followed by a colon, we know we're dealing #", "possible if insecure_includes is set to True. There are cases", "config path url = os.path.join(self.config_path, url) url = '{}://{}'.format(schema, URLBase.quote(url))", "must be set to True. \"\"\" super(ConfigBase, self).__init__(**kwargs) # Tracks", "not result: # Invalid syntax ConfigBase.logger.error( 'Invalid Apprise TEXT configuration", "the rights # to use, copy, modify, merge, publish, distribute,", "file) which will be included # as additional configuration entries", "in more. This is more of a safe guard then", "the configuration should be considered as expired or False if", "is a boolean, then # we want to convert the", "valid or not VALID_TOKEN = re.compile( r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I) class ConfigBase(URLBase):", "# our configuration based urls. schema = GET_SCHEMA_RE.match(url) if schema", "continue if not (hasattr(asset, k) and isinstance(getattr(asset, k), (bool, six.string_types))):", "with a URL, or our tag # definitions (accepting commas)", "schema = _schema.group('schema').lower() # Store our URL and Schema Regex", "results.append(_results) elif isinstance(url, dict): # We are a url string", "(bool, six.string_types)) and isinstance(getattr(asset, k), bool)): # If the object", "if v is None: # Convert to an empty string", "None # By default set our return value to None", "tags to associate with the newly added # notifications if", "prohibited. ' 'Ignoring URL {}'.format(schema, url)) continue # Prepare our", "# definitions (accepting commas) followed by an equal sign we", "to our global set results.append(_results) elif isinstance(url, dict): # We", "self.read(**kwargs) if not isinstance(content, six.string_types): # Set the time our", "self.logger.warning(err) raise TypeError(err) except (ValueError, TypeError): err = 'An invalid", "ones separated by a # comma and/or space includes =", "is hereby granted, free of charge, to any person obtaining", "with a relative path; prepend # our current config path", "value ConfigBase.logger.warning( 'Invalid asset key \"{}\".'.format(k)) continue if v is", "was downloaded correctly. \"\"\" if not isinstance(self._cached_servers, list): # Generate", "invalid or can not be used. self.logger.warning( 'Could not load", "Create log entry of loaded URL ConfigBase.logger.debug( 'Loaded URL: {}'.format(plugin.url()))", "not cause the content to be retrieved again. For local", "config_format not in CONFIG_FORMATS: # Invalid configuration type specified ConfigBase.logger.error(", "each line of the file to attempt to detect it", "supported configuration sources \"\"\" # The Default Encoding to use", "schema {}.'.format(schema)) continue # Parse our url details of the", "allow # our configuration based urls. schema = GET_SCHEMA_RE.match(url) if", "self.schemas() and not self.insecure_includes) or \\ SCHEMA_MAP[schema].allow_cross_includes == \\ ContentIncludeMode.NEVER:", "URL information plugin = plugins.SCHEMA_MAP[_results['schema']](**_results) # Create log entry of", "# content again. age_in_sec = time.time() - self._cached_time if age_in_sec", "associate an asset with the notification. The file syntax is:", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "of relative include config_path = os.getcwd() def __init__(self, cache=True, recursion=0,", "' 'configuration entry #{}, item #{}' .format(key, no + 1,", "retrieved can exist for before it should be considered expired.", "# to use, copy, modify, merge, publish, distribute, sublicense, and", "otherwise # detected by the sub-modules default_config_format = ConfigFormat.TEXT #", "format specified {}'.format( results['format'])) del results['format'] # Defines the encoding", "iter(self._cached_servers) def __len__(self): \"\"\" Returns the total number of servers", "the config path manages the handling of relative include config_path", "person obtaining a copy # of this software and associated", "'{}://{}'.format(schema, URLBase.quote(url)) else: # Ensure our schema is always in", "speed self._cached_servers = None # Initialize our recursion value self.recursion", "our configuration should be considered # missing and/or expired. return", "to retrieve the (same) data. This method can be somewhat", "on our config format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Execute", "<Tag(s)>=<URL> # Or you can use this format (no tags", "our dictionary with our new entries r.update(entries) # add our", "as a base to # work with r = _results.copy()", "_results.copy() # We are a url string with additional unescaped", "not isinstance(self._cached_servers, list): # Generate ourselves a list of content", "a base to # work with r = _results.copy() #", "# to work with r = _results.copy() # We are", "= self.read(**kwargs) if not isinstance(content, six.string_types): # Set the time", "to look further into our cached content # and verify", "cache value if 'cache' in results['qsd']: # First try to", "..common import ContentIncludeMode from ..utils import GET_SCHEMA_RE from ..utils import", "be a valid schema of a supported plugin type -", "was not expected string type ConfigBase.logger.error( 'Invalid Apprise configuration specified.')", "global_tags else: # Just use the global settings _results['tag'] =", "it woul fail. However this include would be possible if", "which will be included # as additional configuration entries when", "# Track the URL to-load _url = None # Track", "includes includes = list() # Iterate over each config URL", "have cached results to return; use them return self._cached_servers #", "insecure_includes is set to True. There are cases where a", "results['tag'] = set(parse_list(result.group('tags'))) # Prepare our Asset Object results['asset'] =", "to return; use them return self._cached_servers # Our cached response", "in SCHEMA_MAP: ConfigBase.logger.warning( 'Unsupported include schema {}.'.format(schema)) continue # Parse", "`include` reference, we will only advance through it if recursion", "if 'encoding' in kwargs: # Store the encoding self.encoding =", "are not caching our response and are required to #", "dict): # We are a url string with additional unescaped", "\"\"\" return None def expired(self): \"\"\" Simply returns True if", "to be in STRICT mode are treated as being in", "and identify a # configuration location (like this file) which", "urls root directive # urls = result.get('urls', None) if not", "the time our content was cached at self._cached_time = time.time()", "keyword requires us to fetch more configuration from another source", "There is no limit to how high you set this", "unescaped options. In # this case we want to iterate", "set(parse_list(result.group('tags'))) # Prepare our Asset Object results['asset'] = \\ asset", "Apprise YAML data specified.') ConfigBase.logger.debug( 'YAML Exception:{}{}'.format(os.linesep, e)) return (list(),", "convert the specified string to # match that. setattr(asset, k,", "v in tokens.items() if not k.startswith(prefix)} # Update our entries", "any preset tags global_tags = set(parse_list(tags)) # # include root", "Returns an iterator to our server list \"\"\" if not", "with additional unescaped options. In # this case we want", "would be possible if insecure_includes is set to True. There", "to TEXT config_format = ConfigFormat.TEXT return config_format @staticmethod def config_parse(content,", "result.group('config') if not (url or config): # Comment/empty line; do", "exit return None # Attempt to detect configuration if result.group('yaml'):", "ConfigBase.logger.debug( 'YAML Exception:{}{}'.format(os.linesep, e)) return (list(), list()) if not isinstance(result,", "pull from self.servers() # Pop the element off of the", "take action # with. ConfigBase.logger.warning( 'Invalid URL {}, entry #{}'.format(url,", "time our content was cached at self._cached_time = time.time() return", "default config format if 'format' in results['qsd']: results['format'] = results['qsd'].get('format')", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "# YAML Version version = result.get('version', 1) if version !=", "downloaded correctly. \"\"\" if not isinstance(self._cached_servers, list): # Generate ourselves", "work with r = _results.copy() # add our result set", "OR OTHER DEALINGS IN # THE SOFTWARE. import os import", "bool # instead: results['cache'] = parse_bool(results['qsd']['cache']) return results @staticmethod def", "Strip out any tokens we know that we can't accept", "to attempt to detect it # stop the moment a", "return (list(), list()) url, config = result.group('url'), result.group('config') if not", "# Permission is hereby granted, free of charge, to any", "Asset Object results['asset'] = asset # No cache is required", "self.servers() return len(self._cached_servers) def __bool__(self): \"\"\" Allows the Apprise object", "take place). Unless under very specific circumstances, it is strongly", "We have enough reason to look further into our cached", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "% str(e)) continue # if we reach here, we successfully", "\"\"\" Returns the indexed server entry associated with the loaded", "has something to take action # with. ConfigBase.logger.warning( 'Ignored entry", "asset=asset) @staticmethod def config_parse_text(content, asset=None): \"\"\" Parse the specified content", "invalid configuration format ({}) was specified'.format( config_format)) return (list(), list())", "not in CONFIG_FORMATS: # Simple error checking err = 'An", "# This code is licensed under the MIT License. #", "_results.copy() # add our result set r.update(tokens) # add our", "# Store the enforced config format self.config_format = kwargs.get('format').lower() if", "disable caching if you understand the consequences. You can alternatively", "# add our results to our global set results.append(r) elif", "schema = GET_SCHEMA_RE.match(url) if schema is None: # Log invalid", "can post process a set of tokens provided in a", "class. By default we cache our responses so that subsiquent", "classes \"\"\" return None def expired(self): \"\"\" Simply returns True", "= None for key, tokens in it: # Test our", "of content we can pull from self.servers() # Pop the", "includable using the 'include' # line found in configuration files.", "the file to attempt to detect it # stop the", "keep it low if you do intend to use it.", "loaded. include <ConfigURL> \"\"\" # A list of loaded Notification", "include would be possible if insecure_includes is set to True.", "URLBase.parse_url(url, verify_host=verify_host) if not results: # We're done; we failed", "in plugins.SCHEMA_MAP: entries = ConfigBase.__extract_special_tokens( schema, entries) # Extend our", "Asset Object _results['asset'] = asset try: # Attempt to create", "__nonzero__(self): \"\"\" Allows the Apprise object to be wrapped in", "r.update(tokens) # add our results to our global set results.append(r)", "@staticmethod def config_parse(content, asset=None, config_format=None, **kwargs): \"\"\" Takes the specified", "simple URL string... schema = GET_SCHEMA_RE.match(url) if schema is None:", "_results['asset'] = asset try: # Attempt to create an instance", "directive # includes = result.get('include', None) if isinstance(includes, six.string_types): #", "URL {}, entry #{}'.format( url, no + 1)) continue #", "dict): for k, v in tokens.items(): if k.startswith('_') or k.endswith('_'):", "entry of loaded URL ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url())) except Exception as", "1 # Insecure Includes flag can never be parsed from", "unpacked as:{}{}' .format(no + 1, url, os.linesep, os.linesep.join( ['{}=\"{}\"'.format(k, a)", "SOFTWARE. import os import re import six import yaml import", "a # configuration location (like this file) which will be", "False if content should be retrieved. \"\"\" if isinstance(self._cached_servers, list)", "format ({}) was specified.'.format( self.config_format) self.logger.warning(err) raise TypeError(err) # Set", "reason we should be reading in more. This is more", "where we are not caching our response and are required", "The idea here is we can post process a set", "not in SCHEMA_MAP: ConfigBase.logger.warning( 'Unsupported include schema {}.'.format(schema)) continue #", "There are cases where a self hosting apprise developer may", "on allow_cross_includes rules if (SCHEMA_MAP[schema].allow_cross_includes == ContentIncludeMode.STRICT and schema not", "set if the user overrides the config format on the", "is what we use to instantiate our object if #", "to detect the format type The function returns the actual", "# can read the error printed to screen and take", "# Tracks previously loaded content for speed self._cached_servers = None", "if 'schema' in entries: del entries['schema'] # support our special", "all of our options so we # can at least", "it just isn't an integer; now treat it as a", "strings with a string ConfigBase.logger.warning( 'Invalid asset value to \"{}\".'.format(k))", "= os.getcwd() def __init__(self, cache=True, recursion=0, insecure_includes=False, **kwargs): \"\"\" Initialize", "set to True. \"\"\" super(ConfigBase, self).__init__(**kwargs) # Tracks the time", "these 'include' entries to be honored, this value must be", "not isinstance(result, dict): # Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML", "# add our results to our global set results.append(_results) elif", "at all. But for remote content, this does mean more", "and isinstance(getattr(asset, k), bool)): # If the object in the", "' 'entry #{}, item #{}' .format(no + 1, entry)) ConfigBase.logger.debug('Loading", "default set our return value to None since we don't", "Initialize some general logging and common server arguments that will", "data source and return unparsed content # None is returned", "Services servers = list() # A list of additional configuration", "separated by a # comma and/or space includes = parse_urls(includes)", "and common server arguments that will keep things consistent when", "value we've already # declared (prior to our recursion) results['cache']", "#{}' .format(key, no + 1)) continue # Store our schema", "yaml file specifically formatted for Apprise. Return a tuple that", "r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(?P<text>((?P<tag>[ \\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|' r'((?P<yaml>[a-z0-9]+):.*))?$', re.I) try: # split our content", "as there is no # reason we should be reading", "# This is only set if the user overrides the", "general logging and common server arguments that will keep things", "be parsed and loaded. \"\"\" if not self.expired(): # We", "Includes flag can never be parsed from the URL results['insecure_includes']", "the cache value to an int identifying the number of", "use if not otherwise detected encoding = 'utf-8' # The", "add it to our existing compilation. If the file we", "may also optionally associate an asset with the notification. The", "syntax ConfigBase.logger.error( 'Invalid Apprise YAML version specified {}.'.format(version)) return (list(),", "preset tags global_tags = set(parse_list(tags)) # # include root directive", "in an Python 2.x based 'if statement'. True is returned", "# Execute our config parse function which always returns a", "files referenced. You may also optionally associate an asset with", "formatted for Apprise. Return a tuple that looks like (servers,", "return True if self._cached_servers else False def __nonzero__(self): \"\"\" Allows", "k, v in tokens.items() if k.startswith(prefix)} if not matches: #", "with this entry continue if not isinstance(tokens.get(kw, None), dict): #", "on. This place a role # for cases where we", "associate an asset with the notification. \"\"\" # A list", "is returned if there was an error or simply no", "encoding of the payload if 'encoding' in results['qsd']: results['encoding'] =", "the specified config_format. If a format isn't specified, then it", "list self._cached_servers.extend( cfg_plugin.servers(asset=asset)) # We no longer need our configuration", "= valid_line_re.match(entry) if not result: # Invalid syntax ConfigBase.logger.error( 'Undetectable", "'Undetectable Apprise configuration found ' 'based on line {}.'.format(line)) #", "format (no tags associated) <URL> # you can also use", "printed to screen and take action # otherwise. return (list(),", "arguments associated configs.extend(u for u in url.keys()) # # urls", "a list of content we can pull from self.servers() #", "Removes an indexed Notification Service from the stack and returns", "setattr(asset, k, v.strip()) else: # we must set strings with", "..utils import parse_bool from ..utils import parse_urls from . import", "this entry continue if not isinstance(tokens.get(kw, None), dict): # Invalid;", "Exception: {}'.format(str(e))) continue # if we reach here, we can", "set our return value to None since we don't know", "config path manages the handling of relative include config_path =", "and add it to our existing compilation. If the file", "never be parsed from the URL results['insecure_includes'] = self.insecure_includes try:", "- 1 # Insecure Includes flag can never be parsed", "and / or sell # copies of the Software, and", "# Initialize our insecure_includes flag self.insecure_includes = insecure_includes if 'encoding'", "if not result: # Invalid syntax ConfigBase.logger.error( 'Undetectable Apprise configuration", "use it. insecure_include by default are disabled. When set to", "# comma and/or space includes = parse_urls(includes) elif not isinstance(includes,", "we failed to parse our url return results # Allow", "no includes includes = list() # Iterate over each config", "# global_tags = set() tags = result.get('tag', None) if tags", "the format config_format = ConfigBase.detect_config_format(content) if not config_format: # We", "first item _results = results.pop(0) # tag is a special", "log entry of loaded URL ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url())) except Exception", "includes = list() # Iterate over each config URL for", "# containing all of the information parsed from our URL", "# can at least tell the end user what entries", "hosting apprise developer may wish to load configuration from memory", "try: results['cache'] = int(results['qsd']['cache']) except (ValueError, TypeError): # No problem,", "specified config content and loads it based on the specified", "is off. There is no limit to how high you", "_url, tokens = next(url.iteritems()) else: # six.PY3 _url, tokens =", "We're done; we failed to parse our url return results", "= results['qsd'].get('encoding') # Our cache value if 'cache' in results['qsd']:", "can read the error printed to screen and take action", "allow_cross_includes = ContentIncludeMode.NEVER # the config path manages the handling", "results to our global set results.append(r) elif isinstance(tokens, dict): #", "'URL #{}: {} unpacked as:{}{}' .format(no + 1, url, os.linesep,", "their appropriate value they're expected \"\"\" # Create a copy", "so we skip over # lines matched here. # -", "you can also use the keyword 'include' and identify a", "else: # Just use the global settings _results['tag'] = global_tags", "config plugin to load the data source and return unparsed", "# If we have been configured to do so for", "rights reserved. # # This code is licensed under the", "re.split(r'\\r*\\n', content) except TypeError: # content was not expected string", "(if they're present) if schema in plugins.SCHEMA_MAP: entries = ConfigBase.__extract_special_tokens(", "or multiple ones separated by # a comma and/or space", "line of the file to attempt to detect it #", "in configuration files. allow_cross_includes = ContentIncludeMode.NEVER # the config path", "what was loaded return (servers, configs) @staticmethod def config_parse_yaml(content, asset=None):", "schema not in self.schemas() and not self.insecure_includes) or \\ SCHEMA_MAP[schema].allow_cross_includes", "default value set to True. Returns: A dictionary is returned", "Our results object is what we use to instantiate our", "required to # re-retrieve our settings. self._cached_time = None #", "anything ConfigBase.logger.warning( 'Unsupported URL, entry #{}'.format(no + 1)) continue _results", "parsed URL as a base to # work with r", "logging and common server arguments that will keep things consistent", "our empty cache list return self._cached_servers # Our Configuration format", "# Or you can use this format (no tags associated)", "except (AttributeError, yaml.parser.ParserError, yaml.error.MarkedYAMLError) as e: # Invalid content ConfigBase.logger.error(", "containing the yaml entries parsed. The idea here is we", "look up what these keywords map to their appropriate value", "k.startswith('_') or k.endswith('_'): # Entries are considered reserved if they", "YAML file. # - If we find a string that", "configs) def pop(self, index=-1): \"\"\" Removes an indexed Notification Service", "more then one call can be made to retrieve the", "Determine our prefix: prefix = meta.get('prefix', '+') # Detect any", "want to convert the specified string to # match that.", "{} unpacked as:{}{}' .format(no + 1, url, os.linesep, os.linesep.join( ['{}=\"{}\"'.format(k,", "file:// based ones). In these circumstances if you want these", "# Tracks the time the content was last retrieved on.", "if not isinstance(urls, (list, tuple)): # Not a problem; we", "# the loop above failed to match anything ConfigBase.logger.warning( 'Unsupported", "then # anything else. 128KB (131072B) max_buffer_size = 131072 #", "get it's integer value try: results['cache'] = int(results['qsd']['cache']) except (ValueError,", "retrieve the (same) data. This method can be somewhat inefficient", "({}) was specified'.format( config_format)) return (list(), list()) # Dynamically load", "# Attempt to detect configuration if result.group('yaml'): config_format = ConfigFormat.YAML", "lines matched here. # - Detection begins on the first", "of data into memory as there is no # reason", "allow for line comments # # One or more tags", "r = _results.copy() # We are a url string with", "os.path.isabs(url): # We're dealing with a relative path; prepend #", "at the very least to allow # our configuration based", "Convert to an empty string v = '' if (isinstance(v,", "for k, a in _results.items()]))) # Prepare our Asset Object", "All rights reserved. # # This code is licensed under", "def __init__(self, cache=True, recursion=0, insecure_includes=False, **kwargs): \"\"\" Initialize some general", "'based on line {}.'.format(line)) # Take an early exit return", "# # One or more tags can be idenified using", "def detect_config_format(content, **kwargs): \"\"\" Takes the specified content and attempts", "string v = '' if (isinstance(v, (bool, six.string_types)) and isinstance(getattr(asset,", "type - tokens must be a dictionary containing the yaml", "= meta.get('prefix', '+') # Detect any matches matches = \\", "Tidy our list up _results['tag'] = \\ set(parse_list(_results['tag'])) | global_tags", "were a simple text file only containing a list of", "= recursion # Initialize our insecure_includes flag self.insecure_includes = insecure_includes", "that. setattr(asset, k, parse_bool(v)) elif isinstance(v, six.string_types): # Set our", "requirements 'schema': schema, } if isinstance(tokens, (list, tuple, set)): #", "at least tell the end user what entries were ignored", "object # asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset()", "TypeError(err) # Set our cache flag; it can be True", "self.url())) # Set the time our content was cached at", "a comma and/or space configs.extend(parse_urls(url)) elif isinstance(url, dict): # Store", "config_format. If a format isn't specified, then it is auto", "comma's (,) to separate # them. <Tag(s)>=<URL> # Or you", "r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I) class ConfigBase(URLBase): \"\"\" This is the base class", "asset object # asset = asset if isinstance(asset, AppriseAsset) else", "# We're just a simple URL string... schema = GET_SCHEMA_RE.match(url)", "ConfigBase.logger.warning( 'Ignored entry {} found under urls, entry #{}' .format(key,", "# - Detection begins on the first non-comment and non", "considered expired. recursion defines how deep we recursively handle entries", "be considered # missing and/or expired. return True @staticmethod def", "config_format = ConfigFormat.TEXT return config_format @staticmethod def config_parse(content, asset=None, config_format=None,", "based on our config format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) #", "{}.'.format(url, line)) continue # Build a list of tags to", "class for all supported configuration sources \"\"\" # The Default", "it one level results['recursion'] = self.recursion - 1 # Insecure", "(accepting commas) followed by an equal sign we know #", "restriction, including without limitation the rights # to use, copy,", "# urls = result.get('urls', None) if not isinstance(urls, (list, tuple)):", "entries in tokens: # Copy ourselves a template of our", "have been configured to do so for url in configs:", "# we're dealing with a TEXT format. # Define what", "content # None is returned if there was an error", "or \\ SCHEMA_MAP[schema].allow_cross_includes == \\ ContentIncludeMode.NEVER: # Prevent the loading", "if # we can. Reset it to None on each", "to True. \"\"\" super(ConfigBase, self).__init__(**kwargs) # Tracks the time the", "recursion=0, insecure_includes=False, **kwargs): \"\"\" Initialize some general logging and common", "and ignore arguments associated configs.extend(u for u in url.keys()) #", "results populated by # parse_url() for entries in tokens: #", "We're just a simple URL string... schema = GET_SCHEMA_RE.match(url) if", "asset # No cache is required because we're just lumping", "= ConfigFormat.TEXT ConfigBase.logger.debug( 'Detected TEXT configuration ' 'based on line", "del cfg_plugin else: self.logger.debug( 'Recursion limit reached; ignoring Include URL:", "newly added # notifications if any were set results['tag'] =", "least to allow # our configuration based urls. schema =", "expected \"\"\" # Create a copy of our dictionary tokens", "we have a comment entry # Adjust default format to", "ConfigBase.logger.error( 'An invalid configuration format ({}) was specified'.format( config_format)) return", "result.get('asset', None) if tokens and isinstance(tokens, dict): for k, v", "# includes = result.get('include', None) if isinstance(includes, six.string_types): # Support", "details of the server object as dictionary # containing all", "with the new value setattr(asset, k, v.strip()) else: # we", "set strings with a string ConfigBase.logger.warning( 'Invalid asset value to", "based on the specified config_format. If a format isn't specified,", "store it's # details: _results = plugins.url_to_dict(url) if _results is", "else: self.logger.debug( 'Recursion limit reached; ignoring Include URL: %s' %", "URL {}'.format(url)) continue # Handle cross inclusion based on allow_cross_includes", "schema at the very least to allow # our configuration", "plugins.url_to_dict(url) if _results is None: ConfigBase.logger.warning( 'Unparseable URL {}, entry", "root directive # global_tags = set() tags = result.get('tag', None)", "content we can pull from self.servers() return len(self._cached_servers) def __bool__(self):", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "and schema not in self.schemas() and not self.insecure_includes) or \\", "free of charge, to any person obtaining a copy #", "what we use to instantiate our object if # we", "\"\"\" This object should be implimented by the child classes", "return True @staticmethod def parse_url(url, verify_host=True): \"\"\"Parses the URL and", "in this configuration file to our list self._cached_servers.extend( cfg_plugin.servers(asset=asset)) #", "pop(self, index=-1): \"\"\" Removes an indexed Notification Service from the", "found ' 'based on line {}.'.format(line)) # Take an early", "data servers.append(plugin) # Return what was loaded return (servers, configs)", "up what these keywords map to their appropriate value they're", "k, parse_bool(v)) elif isinstance(v, six.string_types): # Set our asset object", "for speed self._cached_servers = None # Initialize our recursion value", "blank line # matched. # - If we find a", "to parse the server URL ConfigBase.logger.warning( 'Unparseable URL {} on", "some of the special keywords. We effectivley look up what", "type ConfigBase.logger.error( 'Invalid Apprise TEXT based configuration specified.') return (list(),", "tokens = ConfigBase.__extract_special_tokens( schema, tokens) # Copy ourselves a template", "always returns a list return fn(content=content, asset=asset) @staticmethod def config_parse_text(content,", "circumstances if you want these 'include' entries to be honored,", "object asset = asset if isinstance(asset, AppriseAsset) else self.asset #", "try: self.cache = cache if isinstance(cache, bool) else int(cache) if", "specific and customized for Apprise. Args: url (str): The URL", "a # comma and/or space includes = parse_urls(includes) elif not", "configuration specified.') return None # By default set our return", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "correct it tokens[kw] = dict() # strip out processed tokens", "\"\"\" This function takes a list of tokens and updates", "to None since we don't know # what the format", "tokens): \"\"\" This function takes a list of tokens and", "our include line configs.append(config.strip()) continue # Acquire our url tokens", "asset if isinstance(asset, AppriseAsset) else self.asset # Execute our config", "successful, otherwise None is returned. \"\"\" results = URLBase.parse_url(url, verify_host=verify_host)", "VALID_TOKEN.match(key) if not match: ConfigBase.logger.warning( 'Ignoring invalid token ({}) found", "our url tokens results = plugins.url_to_dict(url) if results is None:", "out processed tokens tokens = {k: v for k, v", "by an equal sign we know # we're dealing with", "url in configs: if self.recursion > 0: # Attempt to", "result.group('yaml'): config_format = ConfigFormat.YAML ConfigBase.logger.debug( 'Detected YAML configuration ' 'based", "tokens.items() if k.startswith(prefix)} if not matches: # we're done with", "{}'.format(schema, url)) continue # Prepare our Asset Object results['asset'] =", "global set results.append(_results) elif isinstance(url, dict): # We are a", "ones separated by # a comma and/or space configs.extend(parse_urls(url)) elif", "for k, v in tokens.items(): if k.startswith('_') or k.endswith('_'): #", "'config_parse_{}'.format(config_format)) # Initialize our asset object asset = asset if", "return False # Verify our cache time to determine whether", "as though it were a simple text file only containing", "= ConfigBase.__extract_special_tokens( schema, tokens) # Copy ourselves a template of", "reach here, we successfully loaded our data servers.append(plugin) # Return", "if not k.startswith(prefix)} # Update our entries tokens[kw].update(matches) # Return", "is more of a safe guard then # anything else.", "__init__(self, cache=True, recursion=0, insecure_includes=False, **kwargs): \"\"\" Initialize some general logging", "config = result.group('url'), result.group('config') if not (url or config): #", "results object is what we use to instantiate our object", "'Unsupported Apprise YAML entry #{}'.format(no + 1)) continue # Track", "present) if schema in plugins.SCHEMA_MAP: tokens = ConfigBase.__extract_special_tokens( schema, tokens)", "SCHEMA_MAP[schema].parse_url(url) if not results: # Failed to parse the server", "return def servers(self, asset=None, **kwargs): \"\"\" Performs reads loaded configuration", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "if _schema is None: # Log invalid entries so that", "(if SSL transactions take place). Unless under very specific circumstances,", "YAML ' 'configuration entry #{}, item #{}' .format(key, no +", "idenified using comma's (,) to separate # them. <Tag(s)>=<URL> #", "fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Execute our config parse function", "config file at least has something to take action #", "# Acquire our url tokens results = plugins.url_to_dict(url) if results", "makes no difference at all. But for remote content, this", "to fully parse. verify_host (:obj:`bool`, optional): a flag kept with", "Store the encoding self.encoding = kwargs.get('encoding') if 'format' in kwargs", "if disabled. Only disable caching if you understand the consequences.", "valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(?P<text>((?P<tag>[ \\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|' r'((?P<yaml>[a-z0-9]+):.*))?$', re.I) try: #", "except (ValueError, TypeError): err = 'An invalid cache value ({})", "This is the base class for all supported configuration sources", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "not load URL {} on line {}.'.format( url, line)) ConfigBase.logger.debug('Loading", "entries were ignored # due to errors if six.PY2: it", "CONFIG_FORMATS: URLBase.logger.warning( 'Unsupported format specified {}'.format( results['format'])) del results['format'] #", "will later use to verify SSL keys (if SSL transactions", "and non blank line # matched. # - If we", "!= 1: # Invalid syntax ConfigBase.logger.error( 'Invalid Apprise YAML version", "the data source and return unparsed content # None is", "content was cached at self._cached_time = time.time() return self._cached_servers def", "alternatively set the cache value to an int identifying the", "unparsed content # None is returned if there was an", "shouldn't be parsing. It's owner # can read the error", "data content = self.read(**kwargs) if not isinstance(content, six.string_types): # Set", "before it should be considered expired. recursion defines how deep", "list of loaded notification plugins - configs contains a list", "if k.startswith('_') or k.endswith('_'): # Entries are considered reserved if", "# or enfored. config_format = \\ self.default_config_format \\ if self.config_format", "dict): # Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML based configuration", "and/or override any results populated by # parse_url() for entries", "list of content we can pull from self.servers() return len(self._cached_servers)", "here. # - Detection begins on the first non-comment and", "though it were a yaml file specifically formatted for Apprise.", "charge, to any person obtaining a copy # of this", "place a role # for cases where we are not", "list of tokens and updates them to no longer include", "if 'format' in kwargs \\ and isinstance(kwargs['format'], six.string_types): # Store", "Nothing more to do; return our empty cache list return", "result set r.update(tokens) # add our results to our global", "based urls. schema = GET_SCHEMA_RE.match(url) if schema is None: #", "URL you want to fully parse. verify_host (:obj:`bool`, optional): a", "handle entries that use the `include` keyword. This keyword requires", "'Ignoring URL {}'.format(schema, url)) continue # Prepare our Asset Object", "where: - servers contains a list of loaded notification plugins", "# If we reach here our configuration should be considered", "'based on line {}.'.format(line)) break # If we reach here,", "# # include root directive # includes = result.get('include', None)", "plugins.SCHEMA_MAP[_results['schema']](**_results) # Create log entry of loaded URL ConfigBase.logger.debug( 'Loaded", "the notification. The file syntax is: # # pound/hashtag allow", "this permission notice shall be included in # all copies", "self.logger.info('Loaded {} entries from {}'.format( len(self._cached_servers), self.url())) else: self.logger.warning( 'Failed", "iteration results = list() if isinstance(url, six.string_types): # We're just", "URL {} on line {}.'.format( url, line)) ConfigBase.logger.debug('Loading Exception: %s'", "our global set results.append(r) else: # add our results to", "value self.recursion = recursion # Initialize our insecure_includes flag self.insecure_includes", "iterate over all of our options so we # can", "The file syntax is: # # pound/hashtag allow for line", "from the URL results['insecure_includes'] = self.insecure_includes try: # Attempt to", "Apprise configuration specified.') return None # By default set our", "Load our data (safely) result = yaml.load(content, Loader=yaml.SafeLoader) except (AttributeError,", "= 'utf-8' # The default expected configuration format unless otherwise", "'' if (isinstance(v, (bool, six.string_types)) and isinstance(getattr(asset, k), bool)): #", "an asset with the notification. \"\"\" # A list of", "Object _results['asset'] = asset try: # Attempt to create an", "we're just lumping this in # and associating it with", "Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML based configuration specified.') return", "isinstance(asset, AppriseAsset) else self.asset # Execute our config parse function", "are cases where a self hosting apprise developer may wish", "= int(results['qsd']['cache']) except (ValueError, TypeError): # No problem, it just", "list() # Iterate over each config URL for no, url", "our content was cached at self._cached_time = time.time() # Nothing", "we simply have no includes includes = list() # Iterate", "optional): a flag kept with the parsed URL which some", "this include would be possible if insecure_includes is set to", "config format ({}) was specified.'.format( self.config_format) self.logger.warning(err) raise TypeError(err) #", "content ConfigBase.logger.error( 'Invalid Apprise YAML data specified.') ConfigBase.logger.debug( 'YAML Exception:{}{}'.format(os.linesep,", "tokens.items() if not k.startswith(prefix)} # Update our entries tokens[kw].update(matches) #", "to our server list \"\"\" if not isinstance(self._cached_servers, list): #", "the URL # this should always initialize itself as None", "six import yaml import time from .. import plugins from", "= URLBase.parse_url(url, verify_host=verify_host) if not results: # We're done; we", "empty string v = '' if (isinstance(v, (bool, six.string_types)) and", "entry continue if not isinstance(tokens.get(kw, None), dict): # Invalid; correct", "keep things consistent when working with the configurations that inherit", "required because we're just lumping this in # and associating", "six.PY2: _url, tokens = next(url.iteritems()) else: # six.PY3 _url, tokens", "somewhat inefficient if disabled. Only disable caching if you understand", "# Set the time our content was cached at self._cached_time", "rules if (SCHEMA_MAP[schema].allow_cross_includes == ContentIncludeMode.STRICT and schema not in self.schemas()", "# the arguments are invalid or can not be used.", "The function returns the actual format type if detected, otherwise", "format type The function returns the actual format type if", "(in a string format) that contains 'include' entries (even file://", "can pull from self.servers() return iter(self._cached_servers) def __len__(self): \"\"\" Returns", "schema = schema.group('schema').lower() # Some basic validation if schema not", "e: # the arguments are invalid or can not be", "list of tags to associate with the newly added #", "True or a (positive) integer try: self.cache = cache if", "' 'based on line {}.'.format(line)) # Take an early exit", "arguments that will keep things consistent when working with the", "results['cache'] = int(results['qsd']['cache']) except (ValueError, TypeError): # No problem, it", "= 0 while len(results): # Increment our entry count entry", "by the child classes \"\"\" return None def expired(self): \"\"\"", "If we find a string that starts with a URL,", "six.string_types)): # Store any preset tags global_tags = set(parse_list(tags)) #", "True if the configuration should be considered as expired or", "return tokens def __getitem__(self, index): \"\"\" Returns the indexed server", "from another source and add it to our existing compilation.", "if not config_format: # We couldn't detect configuration ConfigBase.logger.error('Could not", "'utf-8' # The default expected configuration format unless otherwise #", "{} entries from {}'.format( len(self._cached_servers), self.url())) else: self.logger.warning( 'Failed to", "the total number of servers loaded \"\"\" if not isinstance(self._cached_servers,", "True is returned if our content was downloaded correctly. \"\"\"", "= SCHEMA_MAP[schema].parse_url(url) if not results: # Failed to parse the", "None # Tracks previously loaded content for speed self._cached_servers =", "results: # Failed to parse the server URL self.logger.warning( 'Unparseable", "in results['qsd']: results['encoding'] = results['qsd'].get('encoding') # Our cache value if", "our current config path url = os.path.join(self.config_path, url) url =", "a tuple that looks like (servers, configs) where: - servers", "tuple that looks like (servers, configs) where: - servers contains", "includes = parse_urls(includes) elif not isinstance(includes, (list, tuple)): # Not", "url, no + 1)) continue # add our results to", "# global asset object # asset = asset if isinstance(asset,", "one detected # or enfored. config_format = \\ self.default_config_format \\", "it's # details: _results = plugins.url_to_dict(url) if _results is None:", "_results[key] ConfigBase.logger.trace( 'URL #{}: {} unpacked as:{}{}' .format(no + 1,", "number of servers loaded \"\"\" if not isinstance(self._cached_servers, list): #", "our insecure_includes flag self.insecure_includes = insecure_includes if 'encoding' in kwargs:", "fn(content=content, asset=asset) self._cached_servers.extend(servers) # Configuration files were detected; recursively populate", "# stop the moment a the type has been determined", "ConfigBase.logger.warning( 'Unsupported include schema {}.'.format(schema)) continue # Parse our url", "remotely retrieve also has an `include` reference, we will only", "configs = list() try: # Load our data (safely) result", "on line {}.'.format( url, line)) ConfigBase.logger.debug('Loading Exception: %s' % str(e))", "entry += 1 # Grab our first item _results =", "(list, tuple, six.string_types)): # Store any preset tags global_tags =", "if not (hasattr(asset, k) and isinstance(getattr(asset, k), (bool, six.string_types))): #", "is None: # Failed to parse the server URL ConfigBase.logger.warning(", "parse_urls(includes) elif not isinstance(includes, (list, tuple)): # Not a problem;", "# Set our cache flag; it can be True or", "list() if isinstance(url, six.string_types): # We're just a simple URL", "v for k, v in tokens.items() if not k.startswith(prefix)} #", "content we can pull from self.servers() return iter(self._cached_servers) def __len__(self):", "continue # if we reach here, we can now add", "string or multiple ones separated by # a comma and/or", "configs) where: - servers contains a list of loaded notification", "remote content, this does mean more then one call can", "continue # Acquire our url tokens results = plugins.url_to_dict(url) if", "<URL> # you can also use the keyword 'include' and", "our asset object asset = asset if isinstance(asset, AppriseAsset) else", "URL; we decrement # it one level results['recursion'] = self.recursion", "the specified content as though it were a yaml file", "work with r = _results.copy() # We are a url", "list()) # # global asset object # asset = asset", "this value. It would be recommended to keep it low", "You may also optionally associate an asset with the notification.", "asset key \"{}\".'.format(k)) continue if v is None: # Convert", "return len(self._cached_servers) def __bool__(self): \"\"\" Allows the Apprise object to", "or enfored. config_format = \\ self.default_config_format \\ if self.config_format is", "attempt to detect it # stop the moment a the", "in STRICT mode are treated as being in ALWAYS mode.", "associated configs.extend(u for u in url.keys()) # # urls root", "single inline string or multiple ones separated by # a", "# Our results object is what we use to instantiate", "Log invalid entries so that maintainer of config # config", "\"\"\" results = URLBase.parse_url(url, verify_host=verify_host) if not results: # We're", "as additional configuration entries when loaded. include <ConfigURL> \"\"\" #", "AppriseAsset() tokens = result.get('asset', None) if tokens and isinstance(tokens, dict):", "for key, tokens in it: # Test our schema _schema", "value they're expected \"\"\" # Create a copy of our", "**kwargs): \"\"\" Initialize some general logging and common server arguments", "# otherwise. return (list(), list()) url, config = result.group('url'), result.group('config')", "associated) <URL> # you can also use the keyword 'include'", "ConfigBase.logger.trace( 'URL #{}: {} unpacked as:{}{}' .format(no + 1, url,", "config format on the URL # this should always initialize", "v in tokens.items(): if k.startswith('_') or k.endswith('_'): # Entries are", "the element off of the stack return self._cached_servers.pop(index) @staticmethod def", "include config_path = os.getcwd() def __init__(self, cache=True, recursion=0, insecure_includes=False, **kwargs):", "'Detected TEXT configuration ' 'based on line {}.'.format(line)) break #", "Return what was loaded return (servers, configs) @staticmethod def config_parse_yaml(content,", "ConfigBase.logger.warning( 'Ignored asset key \"{}\".'.format(k)) continue if not (hasattr(asset, k)", "\"\"\"Parses the URL and returns it broken apart into a", "server object as dictionary # containing all of the information", "# Track last acquired schema schema = None for key,", "additional configuration files referenced. You may also optionally associate an", "line {}.'.format(line)) break # If we reach here, we have", "no, url in enumerate(includes): if isinstance(url, six.string_types): # Support a", "Allow overriding the default config format if 'format' in results['qsd']:", "six.string_types): # Support a single inline string or multiple ones", "under very specific circumstances, it is strongly recomended that you", "can be idenified using comma's (,) to separate # them.", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "boolean, then # we want to convert the specified string", "isinstance(includes, (list, tuple)): # Not a problem; we simply have", "each config URL for no, url in enumerate(includes): if isinstance(url,", "should be reading in more. This is more of a", "our Asset Object results['asset'] = asset # No cache is", "of our plugin using the # parsed URL information cfg_plugin", "on the URL # this should always initialize itself as", "can be somewhat inefficient if disabled. Only disable caching if", "config_format: # We couldn't detect configuration ConfigBase.logger.error('Could not detect configuration')", "# Don't read any more of this amount of data", "we recursively handle entries that use the `include` keyword. This", "was specified.'.format( self.config_format) self.logger.warning(err) raise TypeError(err) # Set our cache", "to # re-retrieve our settings. self._cached_time = None # Tracks", "additional configuration files referenced. You may optionally associate an asset", "import time from .. import plugins from ..AppriseAsset import AppriseAsset", "{}.'.format(line)) # Take an early exit return None # Attempt", "list up _results['tag'] = \\ set(parse_list(_results['tag'])) | global_tags else: #", "# split our content up to read line by line", "= 'A negative cache value ({}) was specified.'.format( cache) self.logger.warning(err)", "now treat it as a bool # instead: results['cache'] =", "Minimum requirements 'schema': schema, } if isinstance(tokens, (list, tuple, set)):", "it were a simple text file only containing a list", "# global tag root directive # global_tags = set() tags", "SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE", "Dynamically load our parse_ function based on our config format", "continue # if we reach here, we successfully loaded our", "re import six import yaml import time from .. import", "continue # We found a valid schema worthy of tracking;", "a TEXT format. # Define what a valid line should", "zero it is off. There is no limit to how", "Test our schema _schema = GET_SCHEMA_RE.match(key) if _schema is None:", "of config # config file at least has something to", "expired; return False return False # If we reach here", "Acquire our url tokens results = plugins.url_to_dict(url) if results is", "in _results: # Tidy our list up _results['tag'] = \\", "object if # we can. Reset it to None on", "'Ignored asset key \"{}\".'.format(k)) continue if not (hasattr(asset, k) and", "pull from self.servers() return self._cached_servers[index] def __iter__(self): \"\"\" Returns an", "URL and Schema Regex _url = key if _url is", "if they start or end # with an underscore ConfigBase.logger.warning(", "unescaped options if isinstance(entries, dict): if six.PY2: _url, tokens =", "k) and isinstance(getattr(asset, k), (bool, six.string_types))): # We can't set", "list \"\"\" if not isinstance(self._cached_servers, list): # Generate ourselves a", "__extract_special_tokens(schema, tokens): \"\"\" This function takes a list of tokens", "processed tokens tokens = {k: v for k, v in", "- configs contains a list of additional configuration files referenced.", "our global set results.append(r) elif isinstance(tokens, dict): # support our", "of the stack return self._cached_servers.pop(index) @staticmethod def __extract_special_tokens(schema, tokens): \"\"\"", "that use the `include` keyword. This keyword requires us to", "to True. There are cases where a self hosting apprise", "be parsed from the URL results['insecure_includes'] = self.insecure_includes try: #", "line {}.'.format( url, line)) ConfigBase.logger.debug('Loading Exception: %s' % str(e)) continue", "self._cached_time = None # Tracks previously loaded content for speed", "(list(), list()) url, config = result.group('url'), result.group('config') if not (url", "have no urls urls = list() # Iterate over each", "None \"\"\" # Detect Format Logic: # - A pound/hashtag", "Parse our url details of the server object as dictionary", "parsed. The idea here is we can post process a", "be wrapped in an Python 3.x based 'if statement'. True", "publish, distribute, sublicense, and / or sell # copies of", "to the following conditions : # # The above copyright", "dictionary _results = { # Minimum requirements 'schema': schema, }", "time to determine whether we will get our # content", "or non-string set value ConfigBase.logger.warning( 'Invalid asset key \"{}\".'.format(k)) continue", "time from .. import plugins from ..AppriseAsset import AppriseAsset from", "can not be used. self.logger.warning( 'Could not load include URL:", "documentation files(the \"Software\"), to deal # in the Software without", "Ensure our schema is always in lower case schema =", "no longer need our configuration object del cfg_plugin else: self.logger.debug(", "1, url, os.linesep, os.linesep.join( ['{}=\"{}\"'.format(k, a) for k, a in", "servers and our configuration servers, configs = fn(content=content, asset=asset) self._cached_servers.extend(servers)", "None # Initialize our recursion value self.recursion = recursion #", "continue _results = plugins.url_to_dict(_url) if _results is None: # Setup", "(#) is alawys a comment character so we skip over", "cache our responses so that subsiquent calls does not cause", "it is set to STRICT mode. If an http:// based", "- Detection begins on the first non-comment and non blank", "populate them # If we have been configured to do", "self._cached_servers = None # Initialize our recursion value self.recursion =", "comma and/or space includes = parse_urls(includes) elif not isinstance(includes, (list,", "(servers, configs) @staticmethod def config_parse_yaml(content, asset=None): \"\"\" Parse the specified", "we successfully loaded our data servers.append(plugin) return (servers, configs) def", "want to fully parse. verify_host (:obj:`bool`, optional): a flag kept", "elif not isinstance(includes, (list, tuple)): # Not a problem; we", "is we can post process a set of tokens provided", "entry #{}' .format(key, no + 1)) continue # Store our", "# with a YAML file. # - If we find", "count entry += 1 # Grab our first item _results", "False return False # If we reach here our configuration", "if not result: # Invalid syntax ConfigBase.logger.error( 'Invalid Apprise TEXT", "indexed Notification Service from the stack and returns it. By", "start or end # with an underscore ConfigBase.logger.warning( 'Ignored asset", "the new value setattr(asset, k, v.strip()) else: # we must", "we know # we're dealing with a TEXT format. #", "= cache if isinstance(cache, bool) else int(cache) if self.cache <", "cached content # and verify it has not expired. if", "URL {}'.format(schema, url)) continue # Prepare our Asset Object results['asset']", "place). Unless under very specific circumstances, it is strongly recomended", "as e: # Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML data", "Returns the indexed server entry associated with the loaded notification", "our configuration object del cfg_plugin else: self.logger.debug( 'Recursion limit reached;", "tuple)): # Not a problem; we simply have no urls", "the consequences. You can alternatively set the cache value to", "returns the actual format type if detected, otherwise it returns", "content was cached at self._cached_time = time.time() # Nothing more", "to our existing compilation. If the file we remotely retrieve", "from {}'.format( self.url())) # Set the time our content was", "parse_list from ..utils import parse_bool from ..utils import parse_urls from", "(list(), list()) # Dynamically load our parse_ function based on", "parsed URL as a base # to work with r", "if not isinstance(result, dict): # Invalid content ConfigBase.logger.error( 'Invalid Apprise", "# Set our asset object with the new value setattr(asset,", "of our servers and our configuration servers, configs = fn(content=content,", "= asset # No cache is required because we're just", "wish to load configuration from memory (in a string format)", "Our Configuration format uses a default if one wasn't one", "dictionary containing the yaml entries parsed. The idea here is", "1)) continue # We found a valid schema worthy of", "self.logger.warning( 'Failed to load Apprise configuration from {}'.format( self.url())) #", "under urls, entry #{}' .format(key, no + 1)) continue #", "no + 1)) continue # add our results to our", "continue # Handle cross inclusion based on allow_cross_includes rules if", "can at least tell the end user what entries were", "No cache is required because we're just lumping this in", "results.append(r) elif isinstance(tokens, dict): # support our special tokens (if", "By default, the last element of the list is removed.", "the loading if insecure base protocols ConfigBase.logger.warning( 'Including {}:// based", "been configured to do so for url in configs: if", "string type ConfigBase.logger.error( 'Invalid Apprise configuration specified.') return None #", "user match = VALID_TOKEN.match(key) if not match: ConfigBase.logger.warning( 'Ignoring invalid", "(isinstance(v, (bool, six.string_types)) and isinstance(getattr(asset, k), bool)): # If the", "content # and verify it has not expired. if self.cache", "a simple URL string... schema = GET_SCHEMA_RE.match(url) if schema is", "return (list(), list()) # # global asset object # asset", "very specific circumstances, it is strongly recomended that you leave", "based ones). In these circumstances if you want these 'include'", "None # iterate over each line of the file to", "here is we can post process a set of tokens", "_url = None # Track last acquired schema schema =", "self._cached_servers.extend( cfg_plugin.servers(asset=asset)) # We no longer need our configuration object", "of the list is removed. \"\"\" if not isinstance(self._cached_servers, list):", "a valid schema of a supported plugin type - tokens", "to instantiate our object if # we can. Reset it", "if not self.expired(): # We already have cached results to", "YAML version specified {}.'.format(version)) return (list(), list()) # # global", "our settings. self._cached_time = None # Tracks previously loaded content", "cached results to return; use them return self._cached_servers # Our", "file specifically formatted for Apprise. Return a tuple that looks", "self.logger.warning( 'Could not load include URL: {}'.format(url)) self.logger.debug('Loading Exception: {}'.format(str(e)))", "a flag kept with the parsed URL which some child", "config_format = ConfigFormat.YAML ConfigBase.logger.debug( 'Detected YAML configuration ' 'based on", "# re-retrieve our settings. self._cached_time = None # Tracks previously", "= kwargs.get('encoding') if 'format' in kwargs \\ and isinstance(kwargs['format'], six.string_types):", "else: # Unsupported ConfigBase.logger.warning( 'Unsupported Apprise YAML entry #{}'.format(no +", "__iter__(self): \"\"\" Returns an iterator to our server list \"\"\"", "in ALWAYS mode. Take a file:// based configuration for example,", "asset if isinstance(asset, AppriseAsset) else AppriseAsset() try: # Attempt to", "the arguments are invalid or can not be used. ConfigBase.logger.warning(", "path; prepend # our current config path url = os.path.join(self.config_path,", "isinstance(tokens, dict): # support our special tokens (if they're present)", "under the MIT License. # # Permission is hereby granted,", "kwargs.get('format').lower() if self.config_format not in CONFIG_FORMATS: # Simple error checking", "failed to parse our url return results # Allow overriding", "# Store our include line configs.append(config.strip()) continue # Acquire our", "set value ConfigBase.logger.warning( 'Invalid asset key \"{}\".'.format(k)) continue if v", "ConfigBase.logger.warning( 'Unsupported URL, entry #{}'.format(no + 1)) continue _results =", "in YAML ' 'configuration entry #{}, item #{}' .format(key, no", "the special keywords. We effectivley look up what these keywords", "return self._cached_servers[index] def __iter__(self): \"\"\" Returns an iterator to our", "subject to the following conditions : # # The above", "set() tags = result.get('tag', None) if tags and isinstance(tags, (list,", "transactions take place). Unless under very specific circumstances, it is", "Iterate over each config URL for no, url in enumerate(includes):", "calls does not cause the content to be retrieved again.", "been determined for line, entry in enumerate(content, start=1): result =", "tokens must be a dictionary containing the yaml entries parsed.", "Prepare our Asset Object results['asset'] = \\ asset if isinstance(asset,", "results['cache'] = False # Recursion can never be parsed from", "self.servers() # Pop the element off of the stack return", "to True, all Apprise Config files marked to be in", "url details of the server object as dictionary # containing", "strongly recomended that you leave this default value set to", "will be included # as additional configuration entries when loaded.", "ConfigBase.logger.error( 'Invalid Apprise YAML data specified.') ConfigBase.logger.debug( 'YAML Exception:{}{}'.format(os.linesep, e))", "self._cached_servers[index] def __iter__(self): \"\"\" Returns an iterator to our server", "should always initialize itself as None config_format = None #", "content up to read line by line content = re.split(r'\\r*\\n',", "last retrieved on. This place a role # for cases", "in # and associating it with the cache value we've", "isinstance(url, six.string_types): # We're just a simple URL string... schema", "False def __nonzero__(self): \"\"\" Allows the Apprise object to be", "wrapped in an Python 3.x based 'if statement'. True is", "# We found a valid schema worthy of tracking; store", "'Recursion limit reached; ignoring Include URL: %s' % url) if", "Configuration format uses a default if one wasn't one detected", "if not results: # Failed to parse the server URL", "({}) was specified.'.format(cache) self.logger.warning(err) raise TypeError(err) return def servers(self, asset=None,", "we know that we can't accept and # warn the", "def __getitem__(self, index): \"\"\" Returns the indexed server entry associated", "Build a list of tags to associate with the newly", "item #{}' .format(key, no + 1, entry)) del _results[key] ConfigBase.logger.trace(", "(safely) result = yaml.load(content, Loader=yaml.SafeLoader) except (AttributeError, yaml.parser.ParserError, yaml.error.MarkedYAMLError) as", "If we reach here, we have a comment entry #", "in list(_results.keys()): # Strip out any tokens we know that", "also use the keyword 'include' and identify a # configuration", "detect configuration ConfigBase.logger.error('Could not detect configuration') return (list(), list()) if", "local file references this makes no difference at all. But", "deal # in the Software without restriction, including without limitation", "def __iter__(self): \"\"\" Returns an iterator to our server list", "continue # # global tag root directive # global_tags =", "Track the URL to-load _url = None # Track last", "for before it should be considered expired. recursion defines how", "- A pound/hashtag (#) is alawys a comment character so", "result: # Invalid syntax ConfigBase.logger.error( 'Invalid Apprise TEXT configuration format", "tokens[kw] = dict() # strip out processed tokens tokens =", "otherwise detected encoding = 'utf-8' # The default expected configuration", "notice and this permission notice shall be included in #", "found ' '{} on line {}.'.format(entry, line)) # Assume this", "kept with the parsed URL which some child classes will", "should be considered expired. recursion defines how deep we recursively", "is alawys a comment character so we skip over #", "a problem; we simply have no includes includes = list()", "actual format type if detected, otherwise it returns None \"\"\"", "# # global tag root directive # global_tags = set()", "tokens[kw].update(matches) # Return our tokens return tokens def __getitem__(self, index):", "configuration ' 'based on line {}.'.format(line)) break elif result.group('text'): config_format", "- tokens must be a dictionary containing the yaml entries", "servers, configs = fn(content=content, asset=asset) self._cached_servers.extend(servers) # Configuration files were", "(str): The URL you want to fully parse. verify_host (:obj:`bool`,", "AppriseAsset) else AppriseAsset() tokens = result.get('asset', None) if tokens and", "containing a list of URLs. Return a tuple that looks", "# Invalid syntax ConfigBase.logger.error( 'Invalid Apprise TEXT configuration format found", "CONFIG_FORMATS from ..common import ContentIncludeMode from ..utils import GET_SCHEMA_RE from", "if insecure_includes is set to True. There are cases where", "\"\"\" Returns an iterator to our server list \"\"\" if", "a copy # of this software and associated documentation files(the", "Tracks previously loaded content for speed self._cached_servers = None #", "can never be parsed from the URL results['insecure_includes'] = self.insecure_includes", "'cache' in results['qsd']: # First try to get it's integer", "In # this case we want to iterate over all", "if config: ConfigBase.logger.debug('Include URL: {}'.format(config)) # Store our include line", "None: ConfigBase.logger.warning( 'Unparseable URL {}, entry #{}'.format( url, no +", "we can pull from self.servers() return self._cached_servers[index] def __iter__(self): \"\"\"", "are considered reserved if they start or end # with", "is very specific and customized for Apprise. Args: url (str):", "def __extract_special_tokens(schema, tokens): \"\"\" This function takes a list of", "have not expired, return False return False # Verify our", "value try: results['cache'] = int(results['qsd']['cache']) except (ValueError, TypeError): # No", "based configuration for example, only a file:// based configuration can", "relative include config_path = os.getcwd() def __init__(self, cache=True, recursion=0, insecure_includes=False,", "response and are required to # re-retrieve our settings. self._cached_time", "very least to allow # our configuration based urls. schema", "# We're dealing with a relative path; prepend # our", "except Exception as e: # the arguments are invalid or", "# Invalid syntax ConfigBase.logger.error( 'Invalid Apprise YAML version specified {}.'.format(version))", "isinstance(tags, (list, tuple, six.string_types)): # Store any preset tags global_tags", "urls, entry #{}' .format(key, no + 1)) continue # Store", "We are a url string with additional unescaped options if", "= parse_urls(includes) elif not isinstance(includes, (list, tuple)): # Not a", "a file:// based configuration can include another file:// based one.", "as though it were a yaml file specifically formatted for", "additional unescaped options if isinstance(entries, dict): if six.PY2: _url, tokens", "keywords map to their appropriate value they're expected \"\"\" #", "By default set our return value to None since we", "# THE SOFTWARE. import os import re import six import", "break # If we reach here, we have a comment", "if isinstance(asset, AppriseAsset) else self.asset # Execute our config parse", "to screen and take action # otherwise. return (list(), list())", "then # we want to convert the specified string to", "config_format = None # iterate over each line of the", "entries) # Extend our dictionary with our new entries r.update(entries)", "a url string with additional unescaped options if isinstance(entries, dict):", "the global settings _results['tag'] = global_tags for key in list(_results.keys()):", "special keywords. We effectivley look up what these keywords map", "be possible if insecure_includes is set to True. There are", "\\ set(parse_list(_results['tag'])) | global_tags else: # Just use the global", "return (list(), list()) # Dynamically load our parse_ function based", "isinstance(asset, AppriseAsset) else AppriseAsset() try: # Attempt to create an", "if schema is None: # Log invalid entries so that", "'Unparseable URL {} on line {}.'.format(url, line)) continue # Build", "multiple ones separated by # a comma and/or space configs.extend(parse_urls(url))", "apart into a dictionary. This is very specific and customized", "configs.append(config.strip()) continue # Acquire our url tokens results = plugins.url_to_dict(url)", "schema schema = None for key, tokens in it: #", "is set to 2 deep. If set to zero it", "from self.servers() return self._cached_servers[index] def __iter__(self): \"\"\" Returns an iterator", "tags = result.get('tag', None) if tags and isinstance(tags, (list, tuple,", "= list() if isinstance(url, six.string_types): # We're just a simple", "ConfigBase.logger.warning( 'Including {}:// based configuration is prohibited. ' 'Ignoring URL", "a dictionary containing the yaml entries parsed. The idea here", "Detect Format Logic: # - A pound/hashtag (#) is alawys", "additional configuration files referenced using # the include keyword configs", "also has an `include` reference, we will only advance through", "configs = list() # Define what a valid line should", "not match: ConfigBase.logger.warning( 'Ignoring invalid token ({}) found in YAML", "(131072B) max_buffer_size = 131072 # By default all configuration is", "# Take an early exit return None # Attempt to", "non blank line # matched. # - If we find", "also optionally associate an asset with the notification. The file", "value must be set to True. \"\"\" super(ConfigBase, self).__init__(**kwargs) #", "{}'.format( self.url())) # Set the time our content was cached", "deep we recursively handle entries that use the `include` keyword.", "raise TypeError(err) # Set our cache flag; it can be", "using the # parsed URL information plugin = plugins.SCHEMA_MAP[results['schema']](**results) #", "configuration based urls. schema = GET_SCHEMA_RE.match(url) if schema is None:", "returns True if the configuration should be considered as expired", "invalid token ({}) found in YAML ' 'configuration entry #{},", "..utils import parse_list from ..utils import parse_bool from ..utils import", "not expected string type ConfigBase.logger.error( 'Invalid Apprise TEXT based configuration", "our schema _schema = GET_SCHEMA_RE.match(key) if _schema is None: #", "= self.insecure_includes try: # Attempt to create an instance of", "an `include` reference, we will only advance through it if", "isinstance(getattr(asset, k), bool)): # If the object in the Asset", "parse_bool(results['qsd']['cache']) return results @staticmethod def detect_config_format(content, **kwargs): \"\"\" Takes the", "as expired or False if content should be retrieved. \"\"\"", "ignoring Include URL: %s' % url) if self._cached_servers: self.logger.info('Loaded {}", "then it is auto detected. \"\"\" if config_format is None:", "longer need our configuration object del cfg_plugin else: self.logger.debug( 'Recursion", "defines how deep we recursively handle entries that use the", "(servers, configs) def pop(self, index=-1): \"\"\" Removes an indexed Notification", "the last element of the list is removed. \"\"\" if", "# Minimum requirements 'schema': schema, } if isinstance(tokens, (list, tuple,", "self.logger.debug('Loading Exception: {}'.format(str(e))) continue # if we reach here, we", "raise TypeError(err) return def servers(self, asset=None, **kwargs): \"\"\" Performs reads", "url) if self._cached_servers: self.logger.info('Loaded {} entries from {}'.format( len(self._cached_servers), self.url()))", "ConfigFormat.TEXT # This is only set if the user overrides", "# Create log entry of loaded URL ConfigBase.logger.debug( 'Loaded URL:", "#{}' .format(no + 1, entry)) ConfigBase.logger.debug('Loading Exception: %s' % str(e))", "iterate over each line of the file to attempt to", "we can pull from self.servers() # Pop the element off", "our prefix: prefix = meta.get('prefix', '+') # Detect any matches", "to deal # in the Software without restriction, including without", "Simple error checking err = 'An invalid config format ({})", "# No cache is required because we're just lumping this", "(even file:// based ones). In these circumstances if you want", "not self.insecure_includes) or \\ SCHEMA_MAP[schema].allow_cross_includes == \\ ContentIncludeMode.NEVER: # Prevent", "isinstance(result, dict): # Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML based", "if config_format is None: # Detect the format config_format =", "identify a # configuration location (like this file) which will", "are treated as being in ALWAYS mode. Take a file://", "character so we skip over # lines matched here. #", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "if 'encoding' in results['qsd']: results['encoding'] = results['qsd'].get('encoding') # Our cache", "entry # Adjust default format to TEXT config_format = ConfigFormat.TEXT", "our list up _results['tag'] = \\ set(parse_list(_results['tag'])) | global_tags else:", "parse the server URL self.logger.warning( 'Unparseable include URL {}'.format(url)) continue", "protocols ConfigBase.logger.warning( 'Including {}:// based configuration is prohibited. ' 'Ignoring", "None is returned. \"\"\" results = URLBase.parse_url(url, verify_host=verify_host) if not", "k), (bool, six.string_types))): # We can't set a function or", "the url and ignore arguments associated configs.extend(u for u in", "_results is None: # Setup dictionary _results = { #", "with the notification. \"\"\" # A list of loaded Notification", "detect_config_format(content, **kwargs): \"\"\" Takes the specified content and attempts to", "override any results populated by # parse_url() for entries in", "populated by # parse_url() for entries in tokens: # Copy", "our results to our global set results.append(_results) else: # Unsupported", "return (servers, configs) @staticmethod def config_parse_yaml(content, asset=None): \"\"\" Parse the", "to do so, subject to the following conditions : #", "try to get it's integer value try: results['cache'] = int(results['qsd']['cache'])", "source and return unparsed content # None is returned if", "followed by a colon, we know we're dealing # with", "and/or expired. return True @staticmethod def parse_url(url, verify_host=True): \"\"\"Parses the", "no + 1)) continue # Store our schema schema =", "you set this value. It would be recommended to keep", "if insecure base protocols ConfigBase.logger.warning( 'Including {}:// based configuration is", "a file:// based configuration for example, only a file:// based", "else: # six.PY3 it = iter(url.items()) # Track the URL", "is managed by Apprise object. # The below ensures our", "furnished to do so, subject to the following conditions :", "# Our cached response object self._cached_servers = list() # read()", "followed by an equal sign we know # we're dealing", "elif isinstance(url, dict): # Store the url and ignore arguments", "a string that starts with a URL, or our tag", "# what the format is yet config_format = None #", "This method can be somewhat inefficient if disabled. Only disable", "it: # Test our schema _schema = GET_SCHEMA_RE.match(key) if _schema", "a YAML file. # - If we find a string", "valid_line_re.match(entry) if not result: # Invalid syntax ConfigBase.logger.error( 'Invalid Apprise", "Exception: %s' % str(e)) continue # if we reach here,", "line # matched. # - If we find a string", "\"\"\" # A list of loaded Notification Services servers =", "None is returned if there was an error or simply", "for line comments # # One or more tags can", "# detected by the sub-modules default_config_format = ConfigFormat.TEXT # This", "= list() # Define what a valid line should look", "next(iter(url.items())) # Tags you just can't over-ride if 'schema' in", "= result.get('include', None) if isinstance(includes, six.string_types): # Support a single", "for key in list(_results.keys()): # Strip out any tokens we", "configuration sources \"\"\" # The Default Encoding to use if", "= tokens.copy() for kw, meta in plugins.SCHEMA_MAP[schema]\\ .template_kwargs.items(): # Determine", "directive # global_tags = set() tags = result.get('tag', None) if", "you leave this default value set to True. Returns: A", "our cache time to determine whether we will get our", "for Apprise. Return a tuple that looks like (servers, configs)", "k, v.strip()) else: # we must set strings with a", "{}'.format(str(e))) continue # if we reach here, we can now", "our options so we # can at least tell the", "# We couldn't detect configuration ConfigBase.logger.error('Could not detect configuration') return", "found a valid schema worthy of tracking; store it's #", "they're present) if schema in plugins.SCHEMA_MAP: tokens = ConfigBase.__extract_special_tokens( schema,", "= schema.group('schema').lower() # Some basic validation if schema not in", "all Apprise Config files marked to be in STRICT mode", "and associating it with the cache value we've already #", "into our cached content # and verify it has not", "# in this configuration file to our list self._cached_servers.extend( cfg_plugin.servers(asset=asset))", "'include' entries to be honored, this value must be set", "= _schema.group('schema').lower() # Store our URL and Schema Regex _url", "contains a list of loaded notification plugins - configs contains", "returns a tuple # of our servers and our configuration", "We no longer need our configuration object del cfg_plugin else:", "Execute our config parse function which always returns a list", "be considered expired. recursion defines how deep we recursively handle", "for line, entry in enumerate(content, start=1): result = valid_line_re.match(entry) if", "not in self.schemas() and not self.insecure_includes) or \\ SCHEMA_MAP[schema].allow_cross_includes ==", "it tokens[kw] = dict() # strip out processed tokens tokens", "we reach here, we can now add this servers found", "str(e)) continue # if we reach here, we successfully loaded", "if not isinstance(self._cached_servers, list): # Generate ourselves a list of", "result.get('version', 1) if version != 1: # Invalid syntax ConfigBase.logger.error(", "err = 'An invalid cache value ({}) was specified.'.format(cache) self.logger.warning(err)", "added # notifications if any were set results['tag'] = set(parse_list(result.group('tags')))", "We are a url string with additional unescaped options. In", "anything else. 128KB (131072B) max_buffer_size = 131072 # By default", "all configuration is not includable using the 'include' # line", "or our tag # definitions (accepting commas) followed by an", "Asset Object results['asset'] = \\ asset if isinstance(asset, AppriseAsset) else", "invalid or can not be used. ConfigBase.logger.warning( 'Could not load", "is set to True. There are cases where a self", "# We're done; we failed to parse our url return", "# parsed URL information cfg_plugin = SCHEMA_MAP[results['schema']](**results) except Exception as", "additional configuration entries when loaded. include <ConfigURL> \"\"\" # A", "\"\"\" Takes the specified content and attempts to detect the", "the yaml entries parsed. The idea here is we can", "list of URLs. Return a tuple that looks like (servers,", "another source and add it to our existing compilation. If", "When set to True, all Apprise Config files marked to", "from {}'.format( len(self._cached_servers), self.url())) else: self.logger.warning( 'Failed to load Apprise", "we successfully loaded our data servers.append(plugin) # Return what was", "expired or False if content should be retrieved. \"\"\" if", "# Attempt to acquire the schema at the very least", "YAML based configuration specified.') return (list(), list()) # YAML Version", "enumerate(includes): if isinstance(url, six.string_types): # Support a single inline string", "fail. However this include would be possible if insecure_includes is", "is prohibited. ' 'Ignoring URL {}'.format(schema, url)) continue # Prepare", "cache flag; it can be True or a (positive) integer", "# Copy ourselves a template of our parsed URL as", "modify, merge, publish, distribute, sublicense, and / or sell #", "can exist for before it should be considered expired. recursion", "Format Logic: # - A pound/hashtag (#) is alawys a", "= ConfigFormat.YAML ConfigBase.logger.debug( 'Detected YAML configuration ' 'based on line", "files(the \"Software\"), to deal # in the Software without restriction,", "'if statement'. True is returned if our content was downloaded", "return iter(self._cached_servers) def __len__(self): \"\"\" Returns the total number of", "source and add it to our existing compilation. If the", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "one call can be made to retrieve the (same) data.", "configurations that inherit this class. By default we cache our", "not self.expired(): # We already have cached results to return;", "them return self._cached_servers # Our cached response object self._cached_servers =", "= ConfigFormat.TEXT # This is only set if the user", "Invalid syntax ConfigBase.logger.error( 'Undetectable Apprise configuration found ' 'based on", "ConfigBase.logger.error('Could not detect configuration') return (list(), list()) if config_format not", "asset try: # Attempt to create an instance of our", "of tokens and updates them to no longer include any", "{}'.format(config)) # Store our include line configs.append(config.strip()) continue # Acquire", "tokens: # Copy ourselves a template of our parsed URL", "return False return False # Verify our cache time to", "\"\"\" super(ConfigBase, self).__init__(**kwargs) # Tracks the time the content was", "= '{}://{}'.format(schema, URLBase.quote(url)) else: # Ensure our schema is always", "# Invalid; correct it tokens[kw] = dict() # strip out", "Encoding to use if not otherwise detected encoding = 'utf-8'", "or substantial portions of the Software. # # THE SOFTWARE", "by default are disabled. When set to True, all Apprise", "# If the object in the Asset is a boolean,", "a file schema = 'file' if not os.path.isabs(url): # We're", "Failed to parse the server URL ConfigBase.logger.warning( 'Unparseable URL {}", "you understand the consequences. You can alternatively set the cache", "with the configurations that inherit this class. By default we", "..common import ConfigFormat from ..common import CONFIG_FORMATS from ..common import", "dealing with a relative path; prepend # our current config", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "= None # Track last acquired schema schema = None", "of our dictionary tokens = tokens.copy() for kw, meta in", "= results.pop(0) # tag is a special keyword that is", "whatever it takes for the # config plugin to load", "global_tags for key in list(_results.keys()): # Strip out any tokens", "- schema must be a valid schema of a supported", "action # with. ConfigBase.logger.warning( 'Invalid URL {}, entry #{}'.format(url, no", "and associated documentation files(the \"Software\"), to deal # in the", "meta in plugins.SCHEMA_MAP[schema]\\ .template_kwargs.items(): # Determine our prefix: prefix =", "we know we're dealing # with a YAML file. #", "or multiple ones separated by a # comma and/or space", "parse_bool(v)) elif isinstance(v, six.string_types): # Set our asset object with", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "list() # Define what a valid line should look like", "if isinstance(tokens, (list, tuple, set)): # populate and/or override any", "r'(?P<text>((?P<tag>[ \\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|' r'((?P<yaml>[a-z0-9]+):.*))?$', re.I) try: # split our content up", "they're expected \"\"\" # Create a copy of our dictionary", "the first non-comment and non blank line # matched. #", "# One or more tags can be idenified using comma's", "line should look like valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(?P<text>((?P<tag>[ \\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|'", "these circumstances if you want these 'include' entries to be", "_results.items()]))) # Prepare our Asset Object _results['asset'] = asset try:", "our recursion) results['cache'] = False # Recursion can never be", "self.default_config_format \\ if self.config_format is None else self.config_format # Dynamically", "= list() # Iterate over each config URL for no,", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "= plugins.SCHEMA_MAP[_results['schema']](**_results) # Create log entry of loaded URL ConfigBase.logger.debug(", "if not matches: # we're done with this entry continue", "not k.startswith(prefix)} # Update our entries tokens[kw].update(matches) # Return our", "total number of servers loaded \"\"\" if not isinstance(self._cached_servers, list):", "can't accept and # warn the user match = VALID_TOKEN.match(key)", "keywords. We effectivley look up what these keywords map to", "warn the user match = VALID_TOKEN.match(key) if not match: ConfigBase.logger.warning(", "continue # Store our schema schema = _schema.group('schema').lower() # Store", "is set to STRICT mode. If an http:// based configuration", "= GET_SCHEMA_RE.match(key) if _schema is None: # Log invalid entries", "def __len__(self): \"\"\" Returns the total number of servers loaded", "and return unparsed content # None is returned if there", "tuple, six.string_types)): # Store any preset tags global_tags = set(parse_list(tags))", "no data content = self.read(**kwargs) if not isinstance(content, six.string_types): #", "1)) continue # Track our entries entry = 0 while", "return (list(), list()) if config_format not in CONFIG_FORMATS: # Invalid", "from ..common import ConfigFormat from ..common import CONFIG_FORMATS from ..common", "# Track our entries entry = 0 while len(results): #", "is None else self.config_format # Dynamically load our parse_ function", "a dictionary. This is very specific and customized for Apprise.", "self.config_format # Dynamically load our parse_ function based on our", "specified.') return (list(), list()) for line, entry in enumerate(content, start=1):", "enumerate(urls): # Our results object is what we use to", "Assume this is a file we shouldn't be parsing. It's", "not load Apprise YAML configuration ' 'entry #{}, item #{}'", "line; do nothing continue if config: ConfigBase.logger.debug('Include URL: {}'.format(config)) #", "copy of our dictionary tokens = tokens.copy() for kw, meta", "parsed if successful, otherwise None is returned. \"\"\" results =", "None def expired(self): \"\"\" Simply returns True if the configuration", "subsiquent calls does not cause the content to be retrieved", "# Detect any matches matches = \\ {k[1:]: str(v) for", "ContentIncludeMode.STRICT and schema not in self.schemas() and not self.insecure_includes) or", "# Store our schema schema = _schema.group('schema').lower() # Store our", "Unless under very specific circumstances, it is strongly recomended that", "continue # Prepare our Asset Object results['asset'] = asset #", "continue # Parse our url details of the server object", "in plugins.SCHEMA_MAP[schema]\\ .template_kwargs.items(): # Determine our prefix: prefix = meta.get('prefix',", "should be implimented by the child classes \"\"\" return None", "to load the data source and return unparsed content #", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "TEXT configuration ' 'based on line {}.'.format(line)) break # If", "updates them to no longer include any special tokens such", "config_format is None: # Detect the format config_format = ConfigBase.detect_config_format(content)", "specified.'.format(cache) self.logger.warning(err) raise TypeError(err) return def servers(self, asset=None, **kwargs): \"\"\"", "self hosting apprise developer may wish to load configuration from", "__bool__(self): \"\"\" Allows the Apprise object to be wrapped in", "= getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Initialize our asset object asset =", "syntax is: # # pound/hashtag allow for line comments #", "Some basic validation if schema not in SCHEMA_MAP: ConfigBase.logger.warning( 'Unsupported", "in plugins.SCHEMA_MAP: tokens = ConfigBase.__extract_special_tokens( schema, tokens) # Copy ourselves", "tracking; store it's # details: _results = plugins.url_to_dict(url) if _results", "notification. The file syntax is: # # pound/hashtag allow for", "# we're done with this entry continue if not isinstance(tokens.get(kw,", "it broken apart into a dictionary. This is very specific", "cache value to an int identifying the number of seconds", "our servers and our configuration servers, configs = fn(content=content, asset=asset)", "Adjust default format to TEXT config_format = ConfigFormat.TEXT return config_format", "is required because we're just lumping this in # and", "'include' and identify a # configuration location (like this file)", "results.append(r) else: # add our results to our global set", "the server URL ConfigBase.logger.warning( 'Unparseable URL {} on line {}.'.format(url,", "otherwise. return (list(), list()) url, config = result.group('url'), result.group('config') if", "a comment character so we skip over # lines matched", "that is managed by Apprise object. # The below ensures", "Python 3.x based 'if statement'. True is returned if our", "determine whether we will get our # content again. age_in_sec", "schema is always in lower case schema = schema.group('schema').lower() #", "not result: # Invalid syntax ConfigBase.logger.error( 'Undetectable Apprise configuration found", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "you do intend to use it. insecure_include by default are", "A pound/hashtag (#) is alawys a comment character so we", "Setup dictionary _results = { # Minimum requirements 'schema': schema,", "sub-modules default_config_format = ConfigFormat.TEXT # This is only set if", "file. # - If we find a string that starts", "isinstance(asset, AppriseAsset) else AppriseAsset() tokens = result.get('asset', None) if tokens", "any results populated by # parse_url() for entries in tokens:", "matches matches = \\ {k[1:]: str(v) for k, v in", "_results['tag'] = global_tags for key in list(_results.keys()): # Strip out", "as:{}{}' .format(no + 1, url, os.linesep, os.linesep.join( ['{}=\"{}\"'.format(k, a) for", "None) if tokens and isinstance(tokens, dict): for k, v in", "add our results to our global set results.append(r) else: #", "token is valid or not VALID_TOKEN = re.compile( r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I)", "separated by # a comma and/or space configs.extend(parse_urls(url)) elif isinstance(url,", "getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Initialize our asset object asset = asset", "'Invalid Apprise configuration specified.') return None # By default set", "def config_parse(content, asset=None, config_format=None, **kwargs): \"\"\" Takes the specified config", "services that could be parsed and loaded. \"\"\" if not", "and customized for Apprise. Args: url (str): The URL you", "Apprise object. # The below ensures our tags are set", "dictionary. This is very specific and customized for Apprise. Args:", "# Copyright (C) 2020 <NAME> <<EMAIL>> # All rights reserved.", "cross inclusion based on allow_cross_includes rules if (SCHEMA_MAP[schema].allow_cross_includes == ContentIncludeMode.STRICT", "treat it as a bool # instead: results['cache'] = parse_bool(results['qsd']['cache'])", "on line {}.'.format(line)) break # If we reach here, we", "k), bool)): # If the object in the Asset is", "if not isinstance(content, six.string_types): # Set the time our content", "file to attempt to detect it # stop the moment", "configuration from {}'.format( self.url())) # Set the time our content", "and Schema Regex _url = key if _url is None:", "flag can never be parsed from the URL results['insecure_includes'] =", "list of loaded Notification Services servers = list() # A", "information plugin = plugins.SCHEMA_MAP[results['schema']](**results) # Create log entry of loaded", "include another file:// based one. because it is set to", "content we can pull from self.servers() return True if self._cached_servers", "a relative path; prepend # our current config path url", "a list of additional configuration files referenced. You may optionally", "# # Copyright (C) 2020 <NAME> <<EMAIL>> # All rights", "IN # THE SOFTWARE. import os import re import six", "so that maintainer of config # config file at least", "to True. Returns: A dictionary is returned containing the URL", "skip over # lines matched here. # - Detection begins", "self.insecure_includes try: # Attempt to create an instance of our", "# Return our tokens return tokens def __getitem__(self, index): \"\"\"", "in # all copies or substantial portions of the Software.", "a template of our parsed URL as a base #", "it should be considered expired. recursion defines how deep we", "we should be reading in more. This is more of", "self.logger.warning( 'Unparseable include URL {}'.format(url)) continue # Handle cross inclusion", "config format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Execute our config", "config parse function which always returns a list return fn(content=content,", "is returned if our content was downloaded correctly. \"\"\" if", "# Recursion can never be parsed from the URL; we", "an asset with the notification. The file syntax is: #", "the include keyword configs = list() try: # Load our", "entries to be honored, this value must be set to", "and returns it. By default, the last element of the", "will get our # content again. age_in_sec = time.time() -", "= yaml.load(content, Loader=yaml.SafeLoader) except (AttributeError, yaml.parser.ParserError, yaml.error.MarkedYAMLError) as e: #", "we're dealing with a TEXT format. # Define what a", "compilation. If the file we remotely retrieve also has an", "settings _results['tag'] = global_tags for key in list(_results.keys()): # Strip", "over each URL for no, url in enumerate(urls): # Our", "dict): # Store the url and ignore arguments associated configs.extend(u", "you just can't over-ride if 'schema' in entries: del entries['schema']", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "it is strongly recomended that you leave this default value", "to any person obtaining a copy # of this software", "plugin = plugins.SCHEMA_MAP[results['schema']](**results) # Create log entry of loaded URL", "Plan B is to assume we're dealing with a file", "can pull from self.servers() return len(self._cached_servers) def __bool__(self): \"\"\" Allows", "TEXT config_format = ConfigFormat.TEXT return config_format @staticmethod def config_parse(content, asset=None,", "we can pull from self.servers() return len(self._cached_servers) def __bool__(self): \"\"\"", "and/or space configs.extend(parse_urls(url)) elif isinstance(url, dict): # Store the url", "schema = GET_SCHEMA_RE.match(url) if schema is None: # Plan B", "our config format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Initialize our", "Handle cross inclusion based on allow_cross_includes rules if (SCHEMA_MAP[schema].allow_cross_includes ==", "is only set if the user overrides the config format", "identifying the number of seconds the previously retrieved can exist", "%s' % url) if self._cached_servers: self.logger.info('Loaded {} entries from {}'.format(", "asset value to \"{}\".'.format(k)) continue # # global tag root", "of the Software, and to permit persons to whom the", "enough reason to look further into our cached content #", "= time.time() # Nothing more to do; return our empty", "line {}.'.format(line)) # Take an early exit return None #", "done with this entry continue if not isinstance(tokens.get(kw, None), dict):", "YAML data specified.') ConfigBase.logger.debug( 'YAML Exception:{}{}'.format(os.linesep, e)) return (list(), list())", "to allow # our configuration based urls. schema = GET_SCHEMA_RE.match(url)", "# Initialize our asset object asset = asset if isinstance(asset,", "recomended that you leave this default value set to True.", "{}.'.format(schema)) continue # Parse our url details of the server", "Notification Services servers = list() # A list of additional", "we # can at least tell the end user what", "data servers.append(plugin) return (servers, configs) def pop(self, index=-1): \"\"\" Removes", "if self.config_format not in CONFIG_FORMATS: # Simple error checking err", "loaded return (servers, configs) @staticmethod def config_parse_yaml(content, asset=None): \"\"\" Parse", "result.get('urls', None) if not isinstance(urls, (list, tuple)): # Not a", "By default all configuration is not includable using the 'include'", "the previously retrieved can exist for before it should be", "only a file:// based configuration can include another file:// based", "the time the content was last retrieved on. This place", "be parsed from the URL; we decrement # it one", "one wasn't one detected # or enfored. config_format = \\", "comment character so we skip over # lines matched here.", "TypeError: # content was not expected string type ConfigBase.logger.error( 'Invalid", "'format' in results['qsd']: results['format'] = results['qsd'].get('format') if results['format'] not in", "detect the format type The function returns the actual format", "tags and isinstance(tags, (list, tuple, six.string_types)): # Store any preset", "our content was downloaded correctly. \"\"\" if not isinstance(self._cached_servers, list):", "six.PY2: it = url.iteritems() else: # six.PY3 it = iter(url.items())", "instance of our plugin using the # parsed URL information", "of our parsed URL as a base to # work", "do; return our empty cache list return self._cached_servers # Our", "with our new entries r.update(entries) # add our results to", "_results: # Tidy our list up _results['tag'] = \\ set(parse_list(_results['tag']))", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "is returned containing the URL fully parsed if successful, otherwise", "verify_host=verify_host) if not results: # We're done; we failed to", "more of this amount of data into memory as there", "# asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset() tokens", "server entry associated with the loaded notification servers \"\"\" if", "add our results to our global set results.append(_results) else: #", "correctly if 'tag' in _results: # Tidy our list up", "url and ignore arguments associated configs.extend(u for u in url.keys())", "'{} on line {}.'.format(entry, line)) # Assume this is a", "or a (positive) integer try: self.cache = cache if isinstance(cache,", "content and loads it based on the specified config_format. If", "url, config = result.group('url'), result.group('config') if not (url or config):", "based 'if statement'. True is returned if our content was", "moment a the type has been determined for line, entry", "ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url())) except Exception as e: # the arguments", "as None config_format = None # Don't read any more", "self.recursion = recursion # Initialize our insecure_includes flag self.insecure_includes =", "Include URL: %s' % url) if self._cached_servers: self.logger.info('Loaded {} entries", "{}'.format(plugin.url())) except Exception as e: # the arguments are invalid", "if six.PY2: _url, tokens = next(url.iteritems()) else: # six.PY3 _url,", "already # declared (prior to our recursion) results['cache'] = False", "a list of loaded notification plugins - configs contains a", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "configs: if self.recursion > 0: # Attempt to acquire the", "Store any preset tags global_tags = set(parse_list(tags)) # # include", "in the Software without restriction, including without limitation the rights", "# anything else. 128KB (131072B) max_buffer_size = 131072 # By", "bool) else int(cache) if self.cache < 0: err = 'A", "the Apprise object to be wrapped in an Python 3.x", "tags associated) <URL> # you can also use the keyword", "@staticmethod def __extract_special_tokens(schema, tokens): \"\"\" This function takes a list", "YAML configuration ' 'entry #{}, item #{}' .format(no + 1,", "a valid schema worthy of tracking; store it's # details:", "_schema is None: # Log invalid entries so that maintainer", "Simply returns True if the configuration should be considered as", "insecure_includes if 'encoding' in kwargs: # Store the encoding self.encoding", "asset key \"{}\".'.format(k)) continue if not (hasattr(asset, k) and isinstance(getattr(asset,", "we decrement # it one level results['recursion'] = self.recursion -", "to our global set results.append(_results) else: # Unsupported ConfigBase.logger.warning( 'Unsupported", "entry)) ConfigBase.logger.debug('Loading Exception: %s' % str(e)) continue # if we", "returned containing the URL fully parsed if successful, otherwise None", "case schema = schema.group('schema').lower() # Some basic validation if schema", "for entries in tokens: # Copy ourselves a template of", "with the cache value we've already # declared (prior to", "self.config_format is None else self.config_format # Dynamically load our parse_", "inline string or multiple ones separated by # a comma", "is None: # Setup dictionary _results = { # Minimum", "the end user what entries were ignored # due to", "= result.get('version', 1) if version != 1: # Invalid syntax", "content was not expected string type ConfigBase.logger.error( 'Invalid Apprise configuration", "= next(url.iteritems()) else: # six.PY3 _url, tokens = next(iter(url.items())) #", "Takes the specified content and attempts to detect the format", "loaded notification plugins - configs contains a list of additional", "an http:// based configuration file attempted to include a file://", "be retrieved. \"\"\" if isinstance(self._cached_servers, list) and self.cache: # We", "we simply have no urls urls = list() # Iterate", "high you set this value. It would be recommended to", "including without limitation the rights # to use, copy, modify,", "in CONFIG_FORMATS: # Simple error checking err = 'An invalid", "if _results is None: ConfigBase.logger.warning( 'Unparseable URL {}, entry #{}'.format(", "import parse_urls from . import SCHEMA_MAP # Test whether token", "returned if there was an error or simply no data", "= 'file' if not os.path.isabs(url): # We're dealing with a", "now add this servers found # in this configuration file", "user provided some of the special keywords. We effectivley look", "our tokens return tokens def __getitem__(self, index): \"\"\" Returns the", "reserved. # # This code is licensed under the MIT", "of the information parsed from our URL results = SCHEMA_MAP[schema].parse_url(url)", "that inherit this class. By default we cache our responses", "return results @staticmethod def detect_config_format(content, **kwargs): \"\"\" Takes the specified", "<NAME> <<EMAIL>> # All rights reserved. # # This code", "set to 2 deep. If set to zero it is", "line content = re.split(r'\\r*\\n', content) except TypeError: # content was", "@staticmethod def config_parse_yaml(content, asset=None): \"\"\" Parse the specified content as", "# the include keyword configs = list() try: # Load", "re-retrieve our settings. self._cached_time = None # Tracks previously loaded", "server URL self.logger.warning( 'Unparseable include URL {}'.format(url)) continue # Handle", "..utils import GET_SCHEMA_RE from ..utils import parse_list from ..utils import", "entries that use the `include` keyword. This keyword requires us", "we're dealing with a file schema = 'file' if not", "Apprise configuration found ' 'based on line {}.'.format(line)) # Take", "be honored, this value must be set to True. \"\"\"", "url = '{}://{}'.format(schema, URLBase.quote(url)) else: # Ensure our schema is", "this format (no tags associated) <URL> # you can also", "always initialize itself as None config_format = None # Don't", "This object should be implimented by the child classes \"\"\"", "= False # Recursion can never be parsed from the", "it can be True or a (positive) integer try: self.cache", "Entries are considered reserved if they start or end #", "contains a list of additional configuration files referenced. You may", "disabled. Only disable caching if you understand the consequences. You", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "into a dictionary. This is very specific and customized for", "the child classes \"\"\" return None def expired(self): \"\"\" Simply", "our URL and Schema Regex _url = key if _url", "or sell # copies of the Software, and to permit", "to an empty string v = '' if (isinstance(v, (bool,", "# Entries are considered reserved if they start or end", "results['asset'] = \\ asset if isinstance(asset, AppriseAsset) else AppriseAsset() try:", "in it: # Test our schema _schema = GET_SCHEMA_RE.match(key) if", "from self.servers() return len(self._cached_servers) def __bool__(self): \"\"\" Allows the Apprise", "we use to instantiate our object if # we can.", "(prior to our recursion) results['cache'] = False # Recursion can", "TypeError): err = 'An invalid cache value ({}) was specified.'.format(cache)", "# and associating it with the cache value we've already", "version specified {}.'.format(version)) return (list(), list()) # # global asset", "isinstance(self._cached_servers, list): # Generate ourselves a list of content we", "an equal sign we know # we're dealing with a", "results['recursion'] = self.recursion - 1 # Insecure Includes flag can", "'Unsupported format specified {}'.format( results['format'])) del results['format'] # Defines the", "for remote content, this does mean more then one call", "if isinstance(cache, bool) else int(cache) if self.cache < 0: err", "at least has something to take action # with. ConfigBase.logger.warning(", "on line {}.'.format(line)) break elif result.group('text'): config_format = ConfigFormat.TEXT ConfigBase.logger.debug(", "just isn't an integer; now treat it as a bool", "parse our url return results # Allow overriding the default", "if isinstance(asset, AppriseAsset) else AppriseAsset() try: # Attempt to create", "our server list \"\"\" if not isinstance(self._cached_servers, list): # Generate", "a yaml file specifically formatted for Apprise. Return a tuple", "for k, v in tokens.items() if not k.startswith(prefix)} # Update", "This is more of a safe guard then # anything", "valid line should look like valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(\\s*(?P<tags>[^=]+)=|=)?\\s*'", "action # with. ConfigBase.logger.warning( 'Ignored entry {} found under urls,", "{} found under urls, entry #{}' .format(key, no + 1))", "our content was cached at self._cached_time = time.time() return self._cached_servers", "loaded. \"\"\" if not self.expired(): # We already have cached", "Update our entries tokens[kw].update(matches) # Return our tokens return tokens", "there was an error or simply no data content =", "schema, tokens) # Copy ourselves a template of our parsed", "= re.compile( r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I) class ConfigBase(URLBase): \"\"\" This is the", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "format unless otherwise # detected by the sub-modules default_config_format =", "' 'Ignoring URL {}'.format(schema, url)) continue # Prepare our Asset", "the default config format if 'format' in results['qsd']: results['format'] =", "end user what entries were ignored # due to errors", "returns it broken apart into a dictionary. This is very", "+ 1)) continue _results = plugins.url_to_dict(_url) if _results is None:", "default expected configuration format unless otherwise # detected by the", "{}.'.format(version)) return (list(), list()) # # global asset object #", "_schema.group('schema').lower() # Store our URL and Schema Regex _url =", "ConfigBase.logger.error( 'Invalid Apprise YAML based configuration specified.') return (list(), list())", "parse_url(url, verify_host=True): \"\"\"Parses the URL and returns it broken apart", "and isinstance(kwargs['format'], six.string_types): # Store the enforced config format self.config_format", "by a colon, we know we're dealing # with a", "our config parse function which always returns a tuple #", "role # for cases where we are not caching our", "want to iterate over all of our options so we", "'Unparseable URL {}, entry #{}'.format( url, no + 1)) continue", "_schema = GET_SCHEMA_RE.match(key) if _schema is None: # Log invalid", "longer include any special tokens such as +,-, and :", "how high you set this value. It would be recommended", "of this software and associated documentation files(the \"Software\"), to deal", "Defines the encoding of the payload if 'encoding' in results['qsd']:", "while len(results): # Increment our entry count entry += 1", ". import SCHEMA_MAP # Test whether token is valid or", "and isinstance(tags, (list, tuple, six.string_types)): # Store any preset tags", "tokens return tokens def __getitem__(self, index): \"\"\" Returns the indexed", "self.servers() return self._cached_servers[index] def __iter__(self): \"\"\" Returns an iterator to", "of content we can pull from self.servers() return True if", "config_parse_yaml(content, asset=None): \"\"\" Parse the specified content as though it", "entry)) del _results[key] ConfigBase.logger.trace( 'URL #{}: {} unpacked as:{}{}' .format(no", "Iterate over each URL for no, url in enumerate(urls): #", "set to zero it is off. There is no limit", "isinstance(kwargs['format'], six.string_types): # Store the enforced config format self.config_format =", "for the # config plugin to load the data source", "We found a valid schema worthy of tracking; store it's", "included # as additional configuration entries when loaded. include <ConfigURL>", "content should be retrieved. \"\"\" if isinstance(self._cached_servers, list) and self.cache:", "permit persons to whom the Software is # furnished to", "{}'.format(url)) self.logger.debug('Loading Exception: {}'.format(str(e))) continue # if we reach here,", "if result.group('yaml'): config_format = ConfigFormat.YAML ConfigBase.logger.debug( 'Detected YAML configuration '", "we can pull from self.servers() return iter(self._cached_servers) def __len__(self): \"\"\"", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "# Failed to parse the server URL ConfigBase.logger.warning( 'Unparseable URL", "with a string ConfigBase.logger.warning( 'Invalid asset value to \"{}\".'.format(k)) continue", "the user overrides the config format on the URL #", "from self.servers() return iter(self._cached_servers) def __len__(self): \"\"\" Returns the total", "developer may wish to load configuration from memory (in a", "# If we reach here, we have a comment entry", "above copyright notice and this permission notice shall be included", "SSL keys (if SSL transactions take place). Unless under very", "= valid_line_re.match(entry) if not result: # Invalid syntax ConfigBase.logger.error( 'Invalid", "our list self._cached_servers.extend( cfg_plugin.servers(asset=asset)) # We no longer need our", "which always returns a list return fn(content=content, asset=asset) @staticmethod def", "result.get('tag', None) if tags and isinstance(tags, (list, tuple, six.string_types)): #", "server URL ConfigBase.logger.warning( 'Unparseable URL {} on line {}.'.format(url, line))", "tuple, set)): # populate and/or override any results populated by", "be considered as expired or False if content should be", "of additional configuration files referenced. You may optionally associate an", "the config format on the URL # this should always", "ConfigBase.logger.warning( 'Invalid URL {}, entry #{}'.format(url, no + 1)) continue", "we can pull from self.servers() return True if self._cached_servers else", "configuration if result.group('yaml'): config_format = ConfigFormat.YAML ConfigBase.logger.debug( 'Detected YAML configuration", "insecure base protocols ConfigBase.logger.warning( 'Including {}:// based configuration is prohibited.", "-*- # # Copyright (C) 2020 <NAME> <<EMAIL>> # All", "without limitation the rights # to use, copy, modify, merge,", "this servers found # in this configuration file to our", "return self._cached_servers def read(self): \"\"\" This object should be implimented", "# No problem, it just isn't an integer; now treat", "It would be recommended to keep it low if you", "on line {}.'.format(entry, line)) # Assume this is a file", "plugins.url_to_dict(url) if results is None: # Failed to parse the", "ConfigBase.logger.warning( 'Invalid asset value to \"{}\".'.format(k)) continue # # global", "object with the new value setattr(asset, k, v.strip()) else: #", "results['format'] # Defines the encoding of the payload if 'encoding'", "# support our special tokens (if they're present) if schema", "because it is set to STRICT mode. If an http://", "that maintainer of config # config file at least has", "not expired, return False return False # Verify our cache", "tokens) # Copy ourselves a template of our parsed URL", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "our tags are set correctly if 'tag' in _results: #", "retrieved on. This place a role # for cases where", "that we can't accept and # warn the user match", "software and associated documentation files(the \"Software\"), to deal # in", "= results['qsd'].get('format') if results['format'] not in CONFIG_FORMATS: URLBase.logger.warning( 'Unsupported format", "'An invalid cache value ({}) was specified.'.format(cache) self.logger.warning(err) raise TypeError(err)", "the notification. \"\"\" # A list of loaded Notification Services", "If the object in the Asset is a boolean, then", "'An invalid configuration format ({}) was specified'.format( config_format)) return (list(),", "is a special keyword that is managed by Apprise object.", "was cached at self._cached_time = time.time() # Nothing more to", "asset=None, config_format=None, **kwargs): \"\"\" Takes the specified config content and", "to get it's integer value try: results['cache'] = int(results['qsd']['cache']) except", "configs contains a list of additional configuration files referenced. You", "self.insecure_includes) or \\ SCHEMA_MAP[schema].allow_cross_includes == \\ ContentIncludeMode.NEVER: # Prevent the", "be made to retrieve the (same) data. This method can", "do intend to use it. insecure_include by default are disabled.", "to our recursion) results['cache'] = False # Recursion can never", "URL, entry #{}'.format(no + 1)) continue _results = plugins.url_to_dict(_url) if", "if not match: ConfigBase.logger.warning( 'Ignoring invalid token ({}) found in", "any were set results['tag'] = set(parse_list(result.group('tags'))) # Prepare our Asset", "and not self.insecure_includes) or \\ SCHEMA_MAP[schema].allow_cross_includes == \\ ContentIncludeMode.NEVER: #", "for u in url.keys()) # # urls root directive #", "path manages the handling of relative include config_path = os.getcwd()", "recommended to keep it low if you do intend to", "configuration ' 'based on line {}.'.format(line)) break # If we", "detected # or enfored. config_format = \\ self.default_config_format \\ if", "# Tidy our list up _results['tag'] = \\ set(parse_list(_results['tag'])) |", "{}.'.format(line)) break # If we reach here, we have a", "that will keep things consistent when working with the configurations", "super(ConfigBase, self).__init__(**kwargs) # Tracks the time the content was last", "and/or space includes = parse_urls(includes) elif not isinstance(includes, (list, tuple)):", "it. insecure_include by default are disabled. When set to True,", "missing and/or expired. return True @staticmethod def parse_url(url, verify_host=True): \"\"\"Parses", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "options if isinstance(entries, dict): if six.PY2: _url, tokens = next(url.iteritems())", "# Tags you just can't over-ride if 'schema' in entries:", "is valid or not VALID_TOKEN = re.compile( r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I) class", "specified content as though it were a yaml file specifically", "match that. setattr(asset, k, parse_bool(v)) elif isinstance(v, six.string_types): # Set", "to \"{}\".'.format(k)) continue # # global tag root directive #", "Not a problem; we simply have no urls urls =", "\\ SCHEMA_MAP[schema].allow_cross_includes == \\ ContentIncludeMode.NEVER: # Prevent the loading if", "due to errors if six.PY2: it = url.iteritems() else: #", "the `include` keyword. This keyword requires us to fetch more", "Prevent the loading if insecure base protocols ConfigBase.logger.warning( 'Including {}://", "parse_ function based on our config format fn = getattr(ConfigBase,", "except TypeError: # content was not expected string type ConfigBase.logger.error(", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #", "if age_in_sec <= self.cache: # We have not expired; return", "were set results['tag'] = set(parse_list(result.group('tags'))) # Prepare our Asset Object", "= asset if isinstance(asset, AppriseAsset) else AppriseAsset() tokens = result.get('asset',", "of tokens provided in a YAML file where the user", "cache value we've already # declared (prior to our recursion)", "files referenced using # the include keyword configs = list()", "load configuration from memory (in a string format) that contains", "the type has been determined for line, entry in enumerate(content,", "the actual format type if detected, otherwise it returns None", "root directive # urls = result.get('urls', None) if not isinstance(urls,", "such as +,-, and : - schema must be a", ".format(key, no + 1)) continue # Store our schema schema", "\"\"\" Takes the specified config content and loads it based", "a self hosting apprise developer may wish to load configuration", "know that we can't accept and # warn the user", "keyword configs = list() # Define what a valid line", "a) for k, a in _results.items()]))) # Prepare our Asset", "results['cache'] = parse_bool(results['qsd']['cache']) return results @staticmethod def detect_config_format(content, **kwargs): \"\"\"", "# line found in configuration files. allow_cross_includes = ContentIncludeMode.NEVER #", "entry in enumerate(content, start=1): result = valid_line_re.match(entry) if not result:", "prepend # our current config path url = os.path.join(self.config_path, url)", "six.string_types)) and isinstance(getattr(asset, k), bool)): # If the object in", "results to our global set results.append(r) else: # add our", "the enforced config format self.config_format = kwargs.get('format').lower() if self.config_format not", "# include root directive # includes = result.get('include', None) if", "you want these 'include' entries to be honored, this value", "Config files marked to be in STRICT mode are treated", "all supported configuration sources \"\"\" # The Default Encoding to", "{} on line {}.'.format(url, line)) continue # Build a list", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT", "URL {}, entry #{}'.format(url, no + 1)) continue # We", "_results = plugins.url_to_dict(_url) if _results is None: # Setup dictionary", "However this include would be possible if insecure_includes is set", "as a base # to work with r = _results.copy()", "line found in configuration files. allow_cross_includes = ContentIncludeMode.NEVER # the", "something to take action # with. ConfigBase.logger.warning( 'Ignored entry {}", "= 131072 # By default all configuration is not includable", "can pull from self.servers() # Pop the element off of", "a bool # instead: results['cache'] = parse_bool(results['qsd']['cache']) return results @staticmethod", "it. By default, the last element of the list is", "another file:// based one. because it is set to STRICT", "content as though it were a yaml file specifically formatted", "being in ALWAYS mode. Take a file:// based configuration for", "u in url.keys()) # # urls root directive # urls", "# with. ConfigBase.logger.warning( 'Ignored entry {} found under urls, entry", "over all of our options so we # can at", "int(results['qsd']['cache']) except (ValueError, TypeError): # No problem, it just isn't", "# parse_url() for entries in tokens: # Copy ourselves a", "maintainer of config # config file at least has something", "False return False # Verify our cache time to determine", "else False def __nonzero__(self): \"\"\" Allows the Apprise object to", "#{}, item #{}' .format(no + 1, entry)) ConfigBase.logger.debug('Loading Exception: %s'", "isinstance(url, dict): # We are a url string with additional", "isinstance(content, six.string_types): # Set the time our content was cached", "\"Software\"), to deal # in the Software without restriction, including", "this should always initialize itself as None config_format = None", "Store our URL and Schema Regex _url = key if", "you can use this format (no tags associated) <URL> #", "the server URL self.logger.warning( 'Unparseable include URL {}'.format(url)) continue #", "element off of the stack return self._cached_servers.pop(index) @staticmethod def __extract_special_tokens(schema,", "ConfigBase.logger.error( 'Invalid Apprise configuration specified.') return None # By default", "our config format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Execute our", "key if _url is None: # the loop above failed", "# we want to convert the specified string to #", "not results: # Failed to parse the server URL self.logger.warning(", "servers.append(plugin) # Return what was loaded return (servers, configs) @staticmethod", "information cfg_plugin = SCHEMA_MAP[results['schema']](**results) except Exception as e: # the", "our results to our global set results.append(r) else: # add", "loaded notification servers \"\"\" if not isinstance(self._cached_servers, list): # Generate", "load Apprise YAML configuration ' 'entry #{}, item #{}' .format(no", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "validation if schema not in SCHEMA_MAP: ConfigBase.logger.warning( 'Unsupported include schema", "# with. ConfigBase.logger.warning( 'Invalid URL {}, entry #{}'.format(url, no +", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE", "inefficient if disabled. Only disable caching if you understand the", "= plugins.url_to_dict(_url) if _results is None: # Setup dictionary _results", "here, we successfully loaded our data servers.append(plugin) return (servers, configs)", "re.I) try: # split our content up to read line", "the stack and returns it. By default, the last element", "self.config_format) self.logger.warning(err) raise TypeError(err) # Set our cache flag; it", "alawys a comment character so we skip over # lines", "type has been determined for line, entry in enumerate(content, start=1):", "just lumping this in # and associating it with the", "the keyword 'include' and identify a # configuration location (like", "it with the cache value we've already # declared (prior", "action # otherwise. return (list(), list()) url, config = result.group('url'),", "URL, or our tag # definitions (accepting commas) followed by", "granted, free of charge, to any person obtaining a copy", "(ValueError, TypeError): err = 'An invalid cache value ({}) was", "value to None since we don't know # what the", "has been determined for line, entry in enumerate(content, start=1): result", "add our results to our global set results.append(_results) elif isinstance(url,", "configuration from memory (in a string format) that contains 'include'", "to work with r = _results.copy() # We are a", "# Iterate over each config URL for no, url in", "how deep we recursively handle entries that use the `include`", "ConfigBase.logger.warning( 'Unparseable URL {}, entry #{}'.format( url, no + 1))", "not be used. ConfigBase.logger.warning( 'Could not load Apprise YAML configuration", "asset object asset = asset if isinstance(asset, AppriseAsset) else self.asset", "cfg_plugin = SCHEMA_MAP[results['schema']](**results) except Exception as e: # the arguments", "not expired; return False return False # If we reach", "integer value try: results['cache'] = int(results['qsd']['cache']) except (ValueError, TypeError): #", "{}.'.format( url, line)) ConfigBase.logger.debug('Loading Exception: %s' % str(e)) continue #", "yaml.parser.ParserError, yaml.error.MarkedYAMLError) as e: # Invalid content ConfigBase.logger.error( 'Invalid Apprise", "verify_host (:obj:`bool`, optional): a flag kept with the parsed URL", "Return a tuple that looks like (servers, configs) where: -", "referenced. You may optionally associate an asset with the notification.", "\"{}\".'.format(k)) continue if v is None: # Convert to an", "a valid line should look like valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|'", "set results.append(r) else: # add our results to our global", "is the base class for all supported configuration sources \"\"\"", "based configuration can include another file:// based one. because it", "entry = 0 while len(results): # Increment our entry count", "like valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(?P<text>((?P<tag>[ \\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|' r'((?P<yaml>[a-z0-9]+):.*))?$', re.I) try:", "# Strip out any tokens we know that we can't", "all of the information parsed from our URL results =", "ConfigBase.logger.debug( 'Loaded URL: {}'.format(plugin.url())) except Exception as e: # the", "cache value ({}) was specified.'.format( cache) self.logger.warning(err) raise TypeError(err) except", "Object results['asset'] = asset # No cache is required because", "if schema in plugins.SCHEMA_MAP: tokens = ConfigBase.__extract_special_tokens( schema, tokens) #", "file references this makes no difference at all. But for", "result.get('include', None) if isinstance(includes, six.string_types): # Support a single inline", "entries parsed. The idea here is we can post process", "key in list(_results.keys()): # Strip out any tokens we know", "appropriate value they're expected \"\"\" # Create a copy of", "# Allow overriding the default config format if 'format' in", "if you understand the consequences. You can alternatively set the", "returns all of the services that could be parsed and", "loaded URL ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url())) except Exception as e: #", "entries tokens[kw].update(matches) # Return our tokens return tokens def __getitem__(self,", "cache time to determine whether we will get our #", "ConfigBase.logger.error( 'Invalid Apprise TEXT based configuration specified.') return (list(), list())", "definitions (accepting commas) followed by an equal sign we know", "something to take action # with. ConfigBase.logger.warning( 'Invalid URL {},", "# reason we should be reading in more. This is", "our parse_ function based on our config format fn =", "config_format @staticmethod def config_parse(content, asset=None, config_format=None, **kwargs): \"\"\" Takes the", "of our options so we # can at least tell", "of the payload if 'encoding' in results['qsd']: results['encoding'] = results['qsd'].get('encoding')", "We can't set a function or non-string set value ConfigBase.logger.warning(", "global set results.append(r) elif isinstance(tokens, dict): # support our special", "be wrapped in an Python 2.x based 'if statement'. True", "expired. if self.cache is True: # we have not expired,", "results = URLBase.parse_url(url, verify_host=verify_host) if not results: # We're done;", "None since we don't know # what the format is", "by the sub-modules default_config_format = ConfigFormat.TEXT # This is only", "# lines matched here. # - Detection begins on the", "we're dealing # with a YAML file. # - If", "'entry #{}, item #{}' .format(no + 1, entry)) ConfigBase.logger.debug('Loading Exception:", "special keyword that is managed by Apprise object. # The", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "try: # split our content up to read line by", "\"\"\" if not isinstance(self._cached_servers, list): # Generate ourselves a list", "set to STRICT mode. If an http:// based configuration file", "v = '' if (isinstance(v, (bool, six.string_types)) and isinstance(getattr(asset, k),", "if six.PY2: it = url.iteritems() else: # six.PY3 it =", "# of our servers and our configuration servers, configs =", "be parsing. It's owner # can read the error printed", "our schema is always in lower case schema = schema.group('schema').lower()", "results: # We're done; we failed to parse our url", "= \\ {k[1:]: str(v) for k, v in tokens.items() if", "was specified.'.format( cache) self.logger.warning(err) raise TypeError(err) except (ValueError, TypeError): err", "difference at all. But for remote content, this does mean", "above failed to match anything ConfigBase.logger.warning( 'Unsupported URL, entry #{}'.format(no", "was specified.'.format(cache) self.logger.warning(err) raise TypeError(err) return def servers(self, asset=None, **kwargs):", "`include` keyword. This keyword requires us to fetch more configuration", "six.string_types): # Set the time our content was cached at", "# Defines the encoding of the payload if 'encoding' in", "specified string to # match that. setattr(asset, k, parse_bool(v)) elif", "file schema = 'file' if not os.path.isabs(url): # We're dealing", "to use it. insecure_include by default are disabled. When set", "classes will later use to verify SSL keys (if SSL", "returns a list return fn(content=content, asset=asset) @staticmethod def config_parse_text(content, asset=None):", "# if we reach here, we successfully loaded our data", "list of content we can pull from self.servers() return iter(self._cached_servers)", "for example, only a file:// based configuration can include another", "v in tokens.items() if k.startswith(prefix)} if not matches: # we're", "entry #{}'.format(url, no + 1)) continue # We found a", "at self._cached_time = time.time() return self._cached_servers def read(self): \"\"\" This", "file syntax is: # # pound/hashtag allow for line comments", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "here, we can now add this servers found # in", "a the type has been determined for line, entry in", "config_parse_text(content, asset=None): \"\"\" Parse the specified content as though it", "# we can. Reset it to None on each iteration", "on our config format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Initialize", "our cached content # and verify it has not expired.", "rights # to use, copy, modify, merge, publish, distribute, sublicense,", "the URL and returns it broken apart into a dictionary.", "for url in configs: if self.recursion > 0: # Attempt", "= None # iterate over each line of the file", "bool)): # If the object in the Asset is a", "type ConfigBase.logger.error( 'Invalid Apprise configuration specified.') return None # By", "in results['qsd']: results['format'] = results['qsd'].get('format') if results['format'] not in CONFIG_FORMATS:", "based configuration specified.') return (list(), list()) # YAML Version version", "(bool, six.string_types))): # We can't set a function or non-string", "dealing with a file schema = 'file' if not os.path.isabs(url):", "errors if six.PY2: it = url.iteritems() else: # six.PY3 it", "with a file schema = 'file' if not os.path.isabs(url): #", "six.string_types))): # We can't set a function or non-string set", "an instance of our plugin using the # parsed URL", "servers found # in this configuration file to our list", "tuple # of our servers and our configuration servers, configs", "base class for all supported configuration sources \"\"\" # The", "detect configuration if result.group('yaml'): config_format = ConfigFormat.YAML ConfigBase.logger.debug( 'Detected YAML", "here, we have a comment entry # Adjust default format", "{}.'.format(line)) break elif result.group('text'): config_format = ConfigFormat.TEXT ConfigBase.logger.debug( 'Detected TEXT", "content was not expected string type ConfigBase.logger.error( 'Invalid Apprise TEXT", "(list(), list()) if not isinstance(result, dict): # Invalid content ConfigBase.logger.error(", "config content and loads it based on the specified config_format.", "AppriseAsset() try: # Attempt to create an instance of our", "found under urls, entry #{}' .format(key, no + 1)) continue", "# # The above copyright notice and this permission notice", "up _results['tag'] = \\ set(parse_list(_results['tag'])) | global_tags else: # Just", "# and verify it has not expired. if self.cache is", "(hasattr(asset, k) and isinstance(getattr(asset, k), (bool, six.string_types))): # We can't", "results['asset'] = asset # No cache is required because we're", "+ 1, entry)) del _results[key] ConfigBase.logger.trace( 'URL #{}: {} unpacked", "for kw, meta in plugins.SCHEMA_MAP[schema]\\ .template_kwargs.items(): # Determine our prefix:", "function or non-string set value ConfigBase.logger.warning( 'Invalid asset key \"{}\".'.format(k))", "result.group('text'): config_format = ConfigFormat.TEXT ConfigBase.logger.debug( 'Detected TEXT configuration ' 'based", "format) that contains 'include' entries (even file:// based ones). In", "asset=None, **kwargs): \"\"\" Performs reads loaded configuration and returns all", "parsed from the URL results['insecure_includes'] = self.insecure_includes try: # Attempt", "a string followed by a colon, we know we're dealing", "referenced using # the include keyword configs = list() #", "we have been configured to do so for url in", "copies of the Software, and to permit persons to whom", "configuration files. allow_cross_includes = ContentIncludeMode.NEVER # the config path manages", "can be made to retrieve the (same) data. This method", "Schema Regex _url = key if _url is None: #", "\"\"\" if isinstance(self._cached_servers, list) and self.cache: # We have enough", "= re.split(r'\\r*\\n', content) except TypeError: # content was not expected", "isinstance(entries, dict): if six.PY2: _url, tokens = next(url.iteritems()) else: #", "self._cached_servers: self.logger.info('Loaded {} entries from {}'.format( len(self._cached_servers), self.url())) else: self.logger.warning(", "can pull from self.servers() return self._cached_servers[index] def __iter__(self): \"\"\" Returns", "url string with additional unescaped options if isinstance(entries, dict): if", "isn't specified, then it is auto detected. \"\"\" if config_format", "if tokens and isinstance(tokens, dict): for k, v in tokens.items():", "# with an underscore ConfigBase.logger.warning( 'Ignored asset key \"{}\".'.format(k)) continue", "results = list() if isinstance(url, six.string_types): # We're just a", "it to None on each iteration results = list() if", "Apprise YAML entry #{}'.format(no + 1)) continue # Track our", "int(cache) if self.cache < 0: err = 'A negative cache", "Object results['asset'] = \\ asset if isinstance(asset, AppriseAsset) else AppriseAsset()", "k, v in tokens.items() if not k.startswith(prefix)} # Update our", "(list(), list()) if config_format not in CONFIG_FORMATS: # Invalid configuration", "#{}'.format( url, no + 1)) continue # add our results", "Apprise object to be wrapped in an Python 2.x based", "supported plugin type - tokens must be a dictionary containing", "non-string set value ConfigBase.logger.warning( 'Invalid asset key \"{}\".'.format(k)) continue if", "if self._cached_servers else False def __nonzero__(self): \"\"\" Allows the Apprise", "self.asset # Execute our config parse function which always returns", "# add our results to our global set results.append(r) else:", "# Pop the element off of the stack return self._cached_servers.pop(index)", "entry {} found under urls, entry #{}' .format(key, no +", "for Apprise. Args: url (str): The URL you want to", "special tokens such as +,-, and : - schema must", "# this should always initialize itself as None config_format =", "unless otherwise # detected by the sub-modules default_config_format = ConfigFormat.TEXT", "URL results = SCHEMA_MAP[schema].parse_url(url) if not results: # Failed to", "url in enumerate(includes): if isinstance(url, six.string_types): # Support a single", "no + 1)) continue # We found a valid schema", "match = VALID_TOKEN.match(key) if not match: ConfigBase.logger.warning( 'Ignoring invalid token", "Parse the specified content as though it were a simple", "our config parse function which always returns a list return", "Verify our cache time to determine whether we will get", "Or you can use this format (no tags associated) <URL>", "whether token is valid or not VALID_TOKEN = re.compile( r'(?P<token>[a-z0-9][a-z0-9_]+)',", "and verify it has not expired. if self.cache is True:", "Apprise Config files marked to be in STRICT mode are", "our entries tokens[kw].update(matches) # Return our tokens return tokens def", "list()) # YAML Version version = result.get('version', 1) if version", "# # urls root directive # urls = result.get('urls', None)", "retrieved. \"\"\" if isinstance(self._cached_servers, list) and self.cache: # We have", "the URL; we decrement # it one level results['recursion'] =", "self._cached_servers = list() # read() causes the child class to", "we want to iterate over all of our options so", "tokens and updates them to no longer include any special", "an int identifying the number of seconds the previously retrieved", "object. # The below ensures our tags are set correctly", "'Unsupported include schema {}.'.format(schema)) continue # Parse our url details", "# declared (prior to our recursion) results['cache'] = False #", "# # global asset object # asset = asset if", "# Store any preset tags global_tags = set(parse_list(tags)) # #", "'format' in kwargs \\ and isinstance(kwargs['format'], six.string_types): # Store the", "for cases where we are not caching our response and", "len(self._cached_servers), self.url())) else: self.logger.warning( 'Failed to load Apprise configuration from", "YAML file where the user provided some of the special", "our dictionary tokens = tokens.copy() for kw, meta in plugins.SCHEMA_MAP[schema]\\", "our Asset Object results['asset'] = \\ asset if isinstance(asset, AppriseAsset)", "format uses a default if one wasn't one detected #", "results = plugins.url_to_dict(url) if results is None: # Failed to", "+ 1)) continue # Store our schema schema = _schema.group('schema').lower()", "cache list return self._cached_servers # Our Configuration format uses a", "a colon, we know we're dealing # with a YAML", "know # we're dealing with a TEXT format. # Define", "accept and # warn the user match = VALID_TOKEN.match(key) if", "None for key, tokens in it: # Test our schema", "notifications if any were set results['tag'] = set(parse_list(result.group('tags'))) # Prepare", "the object in the Asset is a boolean, then #", "as being in ALWAYS mode. Take a file:// based configuration", "del results['format'] # Defines the encoding of the payload if", "# this case we want to iterate over all of", "else. 128KB (131072B) max_buffer_size = 131072 # By default all", "the Software without restriction, including without limitation the rights #", "to zero it is off. There is no limit to", "kwargs: # Store the encoding self.encoding = kwargs.get('encoding') if 'format'", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "our return value to None since we don't know #", "tokens = next(iter(url.items())) # Tags you just can't over-ride if", "# We no longer need our configuration object del cfg_plugin", "config_format = \\ self.default_config_format \\ if self.config_format is None else", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "to be wrapped in an Python 3.x based 'if statement'.", "Recursion can never be parsed from the URL; we decrement", "do so, subject to the following conditions : # #", "Notification Service from the stack and returns it. By default,", "content ConfigBase.logger.error( 'Invalid Apprise YAML based configuration specified.') return (list(),", "an integer; now treat it as a bool # instead:", "in lower case schema = schema.group('schema').lower() # Some basic validation", "None) if tags and isinstance(tags, (list, tuple, six.string_types)): # Store", "else AppriseAsset() tokens = result.get('asset', None) if tokens and isinstance(tokens,", "wasn't one detected # or enfored. config_format = \\ self.default_config_format", "information parsed from our URL results = SCHEMA_MAP[schema].parse_url(url) if not", "This is very specific and customized for Apprise. Args: url", "verify_host=True): \"\"\"Parses the URL and returns it broken apart into", "matches: # we're done with this entry continue if not", "can be True or a (positive) integer try: self.cache =", "# By default all configuration is not includable using the", "syntax ConfigBase.logger.error( 'Invalid Apprise TEXT configuration format found ' '{}", "os.getcwd() def __init__(self, cache=True, recursion=0, insecure_includes=False, **kwargs): \"\"\" Initialize some", "servers contains a list of loaded notification plugins - configs", "plugin = plugins.SCHEMA_MAP[_results['schema']](**_results) # Create log entry of loaded URL", "({}) was specified.'.format( self.config_format) self.logger.warning(err) raise TypeError(err) # Set our", "# The above copyright notice and this permission notice shall", "True if self._cached_servers else False def __nonzero__(self): \"\"\" Allows the", "MIT License. # # Permission is hereby granted, free of", "of the special keywords. We effectivley look up what these", "object in the Asset is a boolean, then # we", "Test whether token is valid or not VALID_TOKEN = re.compile(", "url) url = '{}://{}'.format(schema, URLBase.quote(url)) else: # Ensure our schema", "'file' if not os.path.isabs(url): # We're dealing with a relative", "invalid cache value ({}) was specified.'.format(cache) self.logger.warning(err) raise TypeError(err) return", "No problem, it just isn't an integer; now treat it", "isinstance(getattr(asset, k), (bool, six.string_types))): # We can't set a function", "from the URL; we decrement # it one level results['recursion']", "do so for url in configs: if self.recursion > 0:", "next(url.iteritems()) else: # six.PY3 _url, tokens = next(iter(url.items())) # Tags", "object to be wrapped in an Python 3.x based 'if", "exist for before it should be considered expired. recursion defines", "associate with the newly added # notifications if any were", "v.strip()) else: # we must set strings with a string", "method can be somewhat inefficient if disabled. Only disable caching", "We already have cached results to return; use them return", "ignore arguments associated configs.extend(u for u in url.keys()) # #", "the services that could be parsed and loaded. \"\"\" if", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "Only disable caching if you understand the consequences. You can", "'Unparseable include URL {}'.format(url)) continue # Handle cross inclusion based", "use this format (no tags associated) <URL> # you can", "It's owner # can read the error printed to screen", "if not (url or config): # Comment/empty line; do nothing", "except (ValueError, TypeError): # No problem, it just isn't an", "url, os.linesep, os.linesep.join( ['{}=\"{}\"'.format(k, a) for k, a in _results.items()])))", "line configs.append(config.strip()) continue # Acquire our url tokens results =", "Tags you just can't over-ride if 'schema' in entries: del", "PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE # AUTHORS OR", "# Test whether token is valid or not VALID_TOKEN =", "_url is None: # the loop above failed to match", "common server arguments that will keep things consistent when working", "Set our asset object with the new value setattr(asset, k,", "URL string... schema = GET_SCHEMA_RE.match(url) if schema is None: #", "entries: del entries['schema'] # support our special tokens (if they're", "if self.cache < 0: err = 'A negative cache value", "no difference at all. But for remote content, this does", "If we reach here our configuration should be considered #", "attempts to detect the format type The function returns the", "' 'based on line {}.'.format(line)) break # If we reach", "= \\ asset if isinstance(asset, AppriseAsset) else AppriseAsset() try: #", "schema = 'file' if not os.path.isabs(url): # We're dealing with", ": # # The above copyright notice and this permission", "= ConfigBase.detect_config_format(content) if not config_format: # We couldn't detect configuration", "be a dictionary containing the yaml entries parsed. The idea", "read any more of this amount of data into memory", "the encoding of the payload if 'encoding' in results['qsd']: results['encoding']", "If an http:// based configuration file attempted to include a", "are set correctly if 'tag' in _results: # Tidy our", "configuration ' 'entry #{}, item #{}' .format(no + 1, entry))", "URL which some child classes will later use to verify", "and updates them to no longer include any special tokens", "asset if isinstance(asset, AppriseAsset) else AppriseAsset() tokens = result.get('asset', None)", "ourselves a list of content we can pull from self.servers()", "advance through it if recursion is set to 2 deep.", "int identifying the number of seconds the previously retrieved can", "be used. self.logger.warning( 'Could not load include URL: {}'.format(url)) self.logger.debug('Loading", "'Invalid URL {}, entry #{}'.format(url, no + 1)) continue #", "line, entry in enumerate(content, start=1): result = valid_line_re.match(entry) if not", "off. There is no limit to how high you set", "memory as there is no # reason we should be", "Takes the specified config content and loads it based on", "None: # Log invalid entries so that maintainer of config", "# Invalid configuration type specified ConfigBase.logger.error( 'An invalid configuration format", "0: # Attempt to acquire the schema at the very", "= time.time() return self._cached_servers def read(self): \"\"\" This object should", "file only containing a list of URLs. Return a tuple", "+ 1)) continue # Track our entries entry = 0", "our url details of the server object as dictionary #", "URL to-load _url = None # Track last acquired schema", "continue if config: ConfigBase.logger.debug('Include URL: {}'.format(config)) # Store our include", "# Grab our first item _results = results.pop(0) # tag", "ConfigBase.logger.warning( 'Ignoring invalid token ({}) found in YAML ' 'configuration", "optionally associate an asset with the notification. The file syntax", "the error printed to screen and take action # otherwise.", "STRICT mode. If an http:// based configuration file attempted to", "if (SCHEMA_MAP[schema].allow_cross_includes == ContentIncludeMode.STRICT and schema not in self.schemas() and", "set of tokens provided in a YAML file where the", "key, tokens in it: # Test our schema _schema =", "base to # work with r = _results.copy() # add", "we can post process a set of tokens provided in", "using # the include keyword configs = list() # Define", "configuration is not includable using the 'include' # line found", "= asset try: # Attempt to create an instance of", "that subsiquent calls does not cause the content to be", "very specific and customized for Apprise. Args: url (str): The", "not results: # We're done; we failed to parse our", "notification plugins - configs contains a list of additional configuration", "tokens.items(): if k.startswith('_') or k.endswith('_'): # Entries are considered reserved", "invalid entries so that maintainer of config # config file", "details: _results = plugins.url_to_dict(url) if _results is None: ConfigBase.logger.warning( 'Unparseable", "A list of loaded Notification Services servers = list() #", "it returns None \"\"\" # Detect Format Logic: # -", "to-load _url = None # Track last acquired schema schema", "does mean more then one call can be made to", "arguments are invalid or can not be used. ConfigBase.logger.warning( 'Could", "tokens = tokens.copy() for kw, meta in plugins.SCHEMA_MAP[schema]\\ .template_kwargs.items(): #", "they start or end # with an underscore ConfigBase.logger.warning( 'Ignored", "Args: url (str): The URL you want to fully parse.", "\"\"\" This is the base class for all supported configuration", "can pull from self.servers() return True if self._cached_servers else False", "the very least to allow # our configuration based urls.", "# Build a list of tags to associate with the", "Return our tokens return tokens def __getitem__(self, index): \"\"\" Returns", "first non-comment and non blank line # matched. # -", "# Iterate over each URL for no, url in enumerate(urls):", "detected encoding = 'utf-8' # The default expected configuration format", "we reach here our configuration should be considered # missing", "be included in # all copies or substantial portions of", "can not be used. ConfigBase.logger.warning( 'Could not load Apprise YAML", "list is removed. \"\"\" if not isinstance(self._cached_servers, list): # Generate", "it is off. There is no limit to how high", "config format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Initialize our asset", "asset = asset if isinstance(asset, AppriseAsset) else self.asset # Execute", "parsed URL information cfg_plugin = SCHEMA_MAP[results['schema']](**results) except Exception as e:", "to be retrieved again. For local file references this makes", "configuration ConfigBase.logger.error('Could not detect configuration') return (list(), list()) if config_format", "problem; we simply have no includes includes = list() #", "to do whatever it takes for the # config plugin", "include keyword configs = list() try: # Load our data", "flag kept with the parsed URL which some child classes", "configuration entries when loaded. include <ConfigURL> \"\"\" # A list", "in entries: del entries['schema'] # support our special tokens (if", "to STRICT mode. If an http:// based configuration file attempted", "reach here, we successfully loaded our data servers.append(plugin) return (servers,", "content as though it were a simple text file only", ".format(key, no + 1, entry)) del _results[key] ConfigBase.logger.trace( 'URL #{}:", "entry associated with the loaded notification servers \"\"\" if not", "circumstances, it is strongly recomended that you leave this default", "\"\"\" Simply returns True if the configuration should be considered", "without restriction, including without limitation the rights # to use,", "# # Permission is hereby granted, free of charge, to", "of tags to associate with the newly added # notifications", "# we have not expired, return False return False #", "'A negative cache value ({}) was specified.'.format( cache) self.logger.warning(err) raise", "the cache value we've already # declared (prior to our", "value to \"{}\".'.format(k)) continue # # global tag root directive", "Tracks the time the content was last retrieved on. This", "load the data source and return unparsed content # None", "list): # Generate ourselves a list of content we can", "GET_SCHEMA_RE.match(url) if schema is None: # Plan B is to", "relative path; prepend # our current config path url =", "have not expired; return False return False # If we", "used. ConfigBase.logger.warning( 'Could not load URL {} on line {}.'.format(", "yaml.error.MarkedYAMLError) as e: # Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML", "ConfigBase.logger.error( 'Invalid Apprise YAML version specified {}.'.format(version)) return (list(), list())", "the format is yet config_format = None # iterate over", "disabled. When set to True, all Apprise Config files marked", "We couldn't detect configuration ConfigBase.logger.error('Could not detect configuration') return (list(),", "include URL {}'.format(url)) continue # Handle cross inclusion based on", "{}.'.format(entry, line)) # Assume this is a file we shouldn't", "urls urls = list() # Iterate over each URL for", "cached at self._cached_time = time.time() return self._cached_servers def read(self): \"\"\"", "can. Reset it to None on each iteration results =", "# details: _results = plugins.url_to_dict(url) if _results is None: ConfigBase.logger.warning(", "loaded \"\"\" if not isinstance(self._cached_servers, list): # Generate ourselves a", "from ..utils import parse_list from ..utils import parse_bool from ..utils", "found in configuration files. allow_cross_includes = ContentIncludeMode.NEVER # the config", "based configuration is prohibited. ' 'Ignoring URL {}'.format(schema, url)) continue", "the base class for all supported configuration sources \"\"\" #", "support our special tokens (if they're present) if schema in", "# Initialize our recursion value self.recursion = recursion # Initialize", "not config_format: # We couldn't detect configuration ConfigBase.logger.error('Could not detect", "isinstance(urls, (list, tuple)): # Not a problem; we simply have", "parse_bool from ..utils import parse_urls from . import SCHEMA_MAP #", "must be a dictionary containing the yaml entries parsed. The", "self.expired(): # We already have cached results to return; use", "our object if # we can. Reset it to None", "ContentIncludeMode.NEVER # the config path manages the handling of relative", "# Load our data (safely) result = yaml.load(content, Loader=yaml.SafeLoader) except", "seconds the previously retrieved can exist for before it should", "return False return False # If we reach here our", "string ConfigBase.logger.warning( 'Invalid asset value to \"{}\".'.format(k)) continue # #", "present) if schema in plugins.SCHEMA_MAP: entries = ConfigBase.__extract_special_tokens( schema, entries)", "err = 'An invalid config format ({}) was specified.'.format( self.config_format)", "cfg_plugin else: self.logger.debug( 'Recursion limit reached; ignoring Include URL: %s'", "URLBase.logger.warning( 'Unsupported format specified {}'.format( results['format'])) del results['format'] # Defines", "root directive # includes = result.get('include', None) if isinstance(includes, six.string_types):", "configuration servers, configs = fn(content=content, asset=asset) self._cached_servers.extend(servers) # Configuration files", "code is licensed under the MIT License. # # Permission", "not otherwise detected encoding = 'utf-8' # The default expected", "causes the child class to do whatever it takes for", "to use, copy, modify, merge, publish, distribute, sublicense, and /", "we shouldn't be parsing. It's owner # can read the", "associating it with the cache value we've already # declared", "entry count entry += 1 # Grab our first item", "servers = list() # A list of additional configuration files", "configured to do so for url in configs: if self.recursion", "Don't read any more of this amount of data into", "include line configs.append(config.strip()) continue # Acquire our url tokens results", "expected configuration format unless otherwise # detected by the sub-modules", "we reach here, we successfully loaded our data servers.append(plugin) return", "empty cache list return self._cached_servers # Our Configuration format uses", "here our configuration should be considered # missing and/or expired.", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "asset object with the new value setattr(asset, k, v.strip()) else:", "AND NONINFRINGEMENT.IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "VALID_TOKEN = re.compile( r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I) class ConfigBase(URLBase): \"\"\" This is", "continue # Track our entries entry = 0 while len(results):", "AppriseAsset) else self.asset # Execute our config parse function which", "indexed server entry associated with the loaded notification servers \"\"\"", "the Asset is a boolean, then # we want to", "with r = _results.copy() # We are a url string", "set(parse_list(tags)) # # include root directive # includes = result.get('include',", "URL # this should always initialize itself as None config_format", "are a url string with additional unescaped options. In #", "url = os.path.join(self.config_path, url) url = '{}://{}'.format(schema, URLBase.quote(url)) else: #", "configuration should be considered as expired or False if content", "url)) continue # Prepare our Asset Object results['asset'] = asset", "try: # Attempt to create an instance of our plugin", "self._cached_servers else False def __nonzero__(self): \"\"\" Allows the Apprise object", "be set to True. \"\"\" super(ConfigBase, self).__init__(**kwargs) # Tracks the", "TypeError): # No problem, it just isn't an integer; now", "line {}.'.format(entry, line)) # Assume this is a file we", "yet config_format = None # iterate over each line of", "decrement # it one level results['recursion'] = self.recursion - 1", "an empty string v = '' if (isinstance(v, (bool, six.string_types))", "# add our results to our global set results.append(_results) else:", "# match that. setattr(asset, k, parse_bool(v)) elif isinstance(v, six.string_types): #", "({}) found in YAML ' 'configuration entry #{}, item #{}'", "specified content as though it were a simple text file", "file:// one it woul fail. However this include would be", "config URL for no, url in enumerate(includes): if isinstance(url, six.string_types):", "yaml import time from .. import plugins from ..AppriseAsset import", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "entry #{}'.format(no + 1)) continue _results = plugins.url_to_dict(_url) if _results", "not in CONFIG_FORMATS: URLBase.logger.warning( 'Unsupported format specified {}'.format( results['format'])) del", "files marked to be in STRICT mode are treated as", "plugin using the # parsed URL information cfg_plugin = SCHEMA_MAP[results['schema']](**results)", "SCHEMA_MAP[results['schema']](**results) except Exception as e: # the arguments are invalid", "simple text file only containing a list of URLs. Return", "= GET_SCHEMA_RE.match(url) if schema is None: # Plan B is", "self.logger.warning(err) raise TypeError(err) # Set our cache flag; it can", "import SCHEMA_MAP # Test whether token is valid or not", "default format to TEXT config_format = ConfigFormat.TEXT return config_format @staticmethod", "returns it. By default, the last element of the list", "a file:// one it woul fail. However this include would", "further into our cached content # and verify it has", "in kwargs: # Store the encoding self.encoding = kwargs.get('encoding') if", "fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Initialize our asset object asset", "prefix = meta.get('prefix', '+') # Detect any matches matches =", "os.linesep.join( ['{}=\"{}\"'.format(k, a) for k, a in _results.items()]))) # Prepare", "fully parse. verify_host (:obj:`bool`, optional): a flag kept with the", "mean more then one call can be made to retrieve", "None # Attempt to detect configuration if result.group('yaml'): config_format =", "up to read line by line content = re.split(r'\\r*\\n', content)", "'Detected YAML configuration ' 'based on line {}.'.format(line)) break elif", "underscore ConfigBase.logger.warning( 'Ignored asset key \"{}\".'.format(k)) continue if not (hasattr(asset,", "our configuration servers, configs = fn(content=content, asset=asset) self._cached_servers.extend(servers) # Configuration", "if 'tag' in _results: # Tidy our list up _results['tag']", "caching if you understand the consequences. You can alternatively set", "return None # Attempt to detect configuration if result.group('yaml'): config_format", "k.startswith(prefix)} # Update our entries tokens[kw].update(matches) # Return our tokens", "because we're just lumping this in # and associating it", "from . import SCHEMA_MAP # Test whether token is valid", "value ({}) was specified.'.format( cache) self.logger.warning(err) raise TypeError(err) except (ValueError,", "our special tokens (if they're present) if schema in plugins.SCHEMA_MAP:", "to separate # them. <Tag(s)>=<URL> # Or you can use", "configuration file to our list self._cached_servers.extend( cfg_plugin.servers(asset=asset)) # We no", "parsed URL information plugin = plugins.SCHEMA_MAP[results['schema']](**results) # Create log entry", "over each line of the file to attempt to detect", "of content we can pull from self.servers() return self._cached_servers[index] def", "item _results = results.pop(0) # tag is a special keyword", "# content was not expected string type ConfigBase.logger.error( 'Invalid Apprise", "iterator to our server list \"\"\" if not isinstance(self._cached_servers, list):", "tokens (if they're present) if schema in plugins.SCHEMA_MAP: tokens =", "our tag # definitions (accepting commas) followed by an equal", "plugins.SCHEMA_MAP[schema]\\ .template_kwargs.items(): # Determine our prefix: prefix = meta.get('prefix', '+')", "Prepare our Asset Object results['asset'] = asset # No cache", "so, subject to the following conditions : # # The", "or can not be used. self.logger.warning( 'Could not load include", "we skip over # lines matched here. # - Detection", "(if they're present) if schema in plugins.SCHEMA_MAP: tokens = ConfigBase.__extract_special_tokens(", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "this makes no difference at all. But for remote content,", "# Parse our url details of the server object as", "to read line by line content = re.split(r'\\r*\\n', content) except", "tags global_tags = set(parse_list(tags)) # # include root directive #", "the following conditions : # # The above copyright notice", "recursively populate them # If we have been configured to", "if results is None: # Failed to parse the server", "inclusion based on allow_cross_includes rules if (SCHEMA_MAP[schema].allow_cross_includes == ContentIncludeMode.STRICT and", "from ..URLBase import URLBase from ..common import ConfigFormat from ..common", "format is yet config_format = None # iterate over each", "function based on our config format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))", "elif isinstance(url, dict): # We are a url string with", "read line by line content = re.split(r'\\r*\\n', content) except TypeError:", "all. But for remote content, this does mean more then", "# Not a problem; we simply have no includes includes", "done; we failed to parse our url return results #", "file attempted to include a file:// one it woul fail.", "elif result.group('text'): config_format = ConfigFormat.TEXT ConfigBase.logger.debug( 'Detected TEXT configuration '", "to detect configuration if result.group('yaml'): config_format = ConfigFormat.YAML ConfigBase.logger.debug( 'Detected", "format to TEXT config_format = ConfigFormat.TEXT return config_format @staticmethod def", "use to verify SSL keys (if SSL transactions take place).", "# read() causes the child class to do whatever it", "list return self._cached_servers # Our Configuration format uses a default", "it low if you do intend to use it. insecure_include", "_url, tokens = next(iter(url.items())) # Tags you just can't over-ride", "element of the list is removed. \"\"\" if not isinstance(self._cached_servers,", "them to no longer include any special tokens such as", "+ 1, url, os.linesep, os.linesep.join( ['{}=\"{}\"'.format(k, a) for k, a", "idea here is we can post process a set of", "file we shouldn't be parsing. It's owner # can read", "were detected; recursively populate them # If we have been", "never be parsed from the URL; we decrement # it", "return (list(), list()) for line, entry in enumerate(content, start=1): result", "time.time() return self._cached_servers def read(self): \"\"\" This object should be", "False # Verify our cache time to determine whether we", "be reading in more. This is more of a safe", "..utils import parse_urls from . import SCHEMA_MAP # Test whether", "cache) self.logger.warning(err) raise TypeError(err) except (ValueError, TypeError): err = 'An", "def expired(self): \"\"\" Simply returns True if the configuration should", "to determine whether we will get our # content again.", "(,) to separate # them. <Tag(s)>=<URL> # Or you can", "keyword configs = list() try: # Load our data (safely)", "and # warn the user match = VALID_TOKEN.match(key) if not", "self.insecure_includes = insecure_includes if 'encoding' in kwargs: # Store the", "# Failed to parse the server URL self.logger.warning( 'Unparseable include", "None: # Detect the format config_format = ConfigBase.detect_config_format(content) if not", "insecure_includes=False, **kwargs): \"\"\" Initialize some general logging and common server", "STRICT mode are treated as being in ALWAYS mode. Take", "['{}=\"{}\"'.format(k, a) for k, a in _results.items()]))) # Prepare our", "valid line should look like valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(?P<text>((?P<tag>[", "Extend our dictionary with our new entries r.update(entries) # add", "woul fail. However this include would be possible if insecure_includes", "import ContentIncludeMode from ..utils import GET_SCHEMA_RE from ..utils import parse_list", "(C) 2020 <NAME> <<EMAIL>> # All rights reserved. # #", "reach here our configuration should be considered # missing and/or", "single inline string or multiple ones separated by a #", "is not includable using the 'include' # line found in", "schema, } if isinstance(tokens, (list, tuple, set)): # populate and/or", "result.group('url'), result.group('config') if not (url or config): # Comment/empty line;", "six.string_types): # Set our asset object with the new value", "options so we # can at least tell the end", "version != 1: # Invalid syntax ConfigBase.logger.error( 'Invalid Apprise YAML", "# Create a copy of our dictionary tokens = tokens.copy()", "# Support a single inline string or multiple ones separated", "Create a copy of our dictionary tokens = tokens.copy() for", "already have cached results to return; use them return self._cached_servers", "a tuple # of our servers and our configuration servers,", "(no tags associated) <URL> # you can also use the", "again. age_in_sec = time.time() - self._cached_time if age_in_sec <= self.cache:", "None # Track last acquired schema schema = None for", "This code is licensed under the MIT License. # #", "should look like valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(?P<text>((?P<tag>[ \\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|' r'((?P<yaml>[a-z0-9]+):.*))?$',", "comments # # One or more tags can be idenified", "= result.get('tag', None) if tags and isinstance(tags, (list, tuple, six.string_types)):", "always in lower case schema = schema.group('schema').lower() # Some basic", "a set of tokens provided in a YAML file where", "None: # Setup dictionary _results = { # Minimum requirements", "which some child classes will later use to verify SSL", "split our content up to read line by line content", "persons to whom the Software is # furnished to do", "not in CONFIG_FORMATS: # Invalid configuration type specified ConfigBase.logger.error( 'An", "Insecure Includes flag can never be parsed from the URL", "start=1): result = valid_line_re.match(entry) if not result: # Invalid syntax", "include schema {}.'.format(schema)) continue # Parse our url details of", "def pop(self, index=-1): \"\"\" Removes an indexed Notification Service from", "number of seconds the previously retrieved can exist for before", "OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.", "plugins from ..AppriseAsset import AppriseAsset from ..URLBase import URLBase from", "look like valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(?P<text>((?P<tag>[ \\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|' r'((?P<yaml>[a-z0-9]+):.*))?$', re.I)", "1, entry)) ConfigBase.logger.debug('Loading Exception: %s' % str(e)) continue # if", "what a valid line should look like valid_line_re = re.compile(", "None) if not isinstance(urls, (list, tuple)): # Not a problem;", "loaded configuration and returns all of the services that could", "You can alternatively set the cache value to an int", "configuration from another source and add it to our existing", "is no limit to how high you set this value.", "from ..common import CONFIG_FORMATS from ..common import ContentIncludeMode from ..utils", "# urls root directive # urls = result.get('urls', None) if", "Initialize our recursion value self.recursion = recursion # Initialize our", "# populate and/or override any results populated by # parse_url()", "or not VALID_TOKEN = re.compile( r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I) class ConfigBase(URLBase): \"\"\"", "to keep it low if you do intend to use", "and loads it based on the specified config_format. If a", "tag # definitions (accepting commas) followed by an equal sign", "to be wrapped in an Python 2.x based 'if statement'.", "return; use them return self._cached_servers # Our cached response object", "return fn(content=content, asset=asset) @staticmethod def config_parse_text(content, asset=None): \"\"\" Parse the", "more configuration from another source and add it to our", "comma and/or space configs.extend(parse_urls(url)) elif isinstance(url, dict): # Store the", "using # the include keyword configs = list() try: #", "our schema schema = _schema.group('schema').lower() # Store our URL and", "plugins.SCHEMA_MAP[results['schema']](**results) # Create log entry of loaded URL ConfigBase.logger.debug('Loaded URL:", "..URLBase import URLBase from ..common import ConfigFormat from ..common import", "#{}'.format(url, no + 1)) continue # We found a valid", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "else AppriseAsset() try: # Attempt to create an instance of", "and take action # otherwise. return (list(), list()) url, config", "dictionary is returned containing the URL fully parsed if successful,", "item #{}' .format(no + 1, entry)) ConfigBase.logger.debug('Loading Exception: %s' %", "content we can pull from self.servers() # Pop the element", "instantiate our object if # we can. Reset it to", "honored, this value must be set to True. \"\"\" super(ConfigBase,", "additional unescaped options. In # this case we want to", "by # a comma and/or space configs.extend(parse_urls(url)) elif isinstance(url, dict):", "the include keyword configs = list() # Define what a", "caching our response and are required to # re-retrieve our", "For local file references this makes no difference at all.", "{}'.format(url)) continue # Handle cross inclusion based on allow_cross_includes rules", "configuration format found ' '{} on line {}.'.format(entry, line)) #", "\\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|' r'((?P<yaml>[a-z0-9]+):.*))?$', re.I) try: # split our content up to", "specific circumstances, it is strongly recomended that you leave this", "to fetch more configuration from another source and add it", "declared (prior to our recursion) results['cache'] = False # Recursion", "plugins.SCHEMA_MAP: entries = ConfigBase.__extract_special_tokens( schema, entries) # Extend our dictionary", "looks like (servers, configs) where: - servers contains a list", "tokens and isinstance(tokens, dict): for k, v in tokens.items(): if", "# All rights reserved. # # This code is licensed", "of this amount of data into memory as there is", "here, we successfully loaded our data servers.append(plugin) # Return what", "our cache flag; it can be True or a (positive)", "# # pound/hashtag allow for line comments # # One", "each iteration results = list() if isinstance(url, six.string_types): # We're", "our configuration based urls. schema = GET_SCHEMA_RE.match(url) if schema is", "self.logger.debug( 'Recursion limit reached; ignoring Include URL: %s' % url)", "= set(parse_list(result.group('tags'))) # Prepare our Asset Object results['asset'] = \\", "the # config plugin to load the data source and", "+= 1 # Grab our first item _results = results.pop(0)", "But for remote content, this does mean more then one", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "results['qsd']: results['encoding'] = results['qsd'].get('encoding') # Our cache value if 'cache'", "substantial portions of the Software. # # THE SOFTWARE IS", "be used. ConfigBase.logger.warning( 'Could not load Apprise YAML configuration '", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "include any special tokens such as +,-, and : -", "if (isinstance(v, (bool, six.string_types)) and isinstance(getattr(asset, k), bool)): # If", "flag; it can be True or a (positive) integer try:", "problem, it just isn't an integer; now treat it as", "takes for the # config plugin to load the data", "populate and/or override any results populated by # parse_url() for", "or config): # Comment/empty line; do nothing continue if config:", "entries = ConfigBase.__extract_special_tokens( schema, entries) # Extend our dictionary with", "and isinstance(tokens, dict): for k, v in tokens.items(): if k.startswith('_')", "with. ConfigBase.logger.warning( 'Invalid URL {}, entry #{}'.format(url, no + 1))", "\"\"\" Returns the total number of servers loaded \"\"\" if", "any person obtaining a copy # of this software and", "more. This is more of a safe guard then #", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "it based on the specified config_format. If a format isn't", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "leave this default value set to True. Returns: A dictionary", "using comma's (,) to separate # them. <Tag(s)>=<URL> # Or", "self.url())) else: self.logger.warning( 'Failed to load Apprise configuration from {}'.format(", "if 'format' in results['qsd']: results['format'] = results['qsd'].get('format') if results['format'] not", "format. # Define what a valid line should look like", "file we remotely retrieve also has an `include` reference, we", "(:obj:`bool`, optional): a flag kept with the parsed URL which", "dict() # strip out processed tokens tokens = {k: v", "if one wasn't one detected # or enfored. config_format =", "an error or simply no data content = self.read(**kwargs) if", "to None on each iteration results = list() if isinstance(url,", "the child class to do whatever it takes for the", "If set to zero it is off. There is no", "# the config path manages the handling of relative include", "a string format) that contains 'include' entries (even file:// based", "raise TypeError(err) except (ValueError, TypeError): err = 'An invalid cache", "TEXT format. # Define what a valid line should look", "# Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML based configuration specified.')", "to # match that. setattr(asset, k, parse_bool(v)) elif isinstance(v, six.string_types):", "our results to our global set results.append(r) elif isinstance(tokens, dict):", "configuration format unless otherwise # detected by the sub-modules default_config_format", "referenced using # the include keyword configs = list() try:", "if detected, otherwise it returns None \"\"\" # Detect Format", "TypeError(err) return def servers(self, asset=None, **kwargs): \"\"\" Performs reads loaded", "prefix: prefix = meta.get('prefix', '+') # Detect any matches matches", "# Create log entry of loaded URL ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url()))", "a url string with additional unescaped options. In # this", "be True or a (positive) integer try: self.cache = cache", "copyright notice and this permission notice shall be included in", "things consistent when working with the configurations that inherit this", "ContentIncludeMode.NEVER: # Prevent the loading if insecure base protocols ConfigBase.logger.warning(", "Initialize our insecure_includes flag self.insecure_includes = insecure_includes if 'encoding' in", "loaded URL ConfigBase.logger.debug( 'Loaded URL: {}'.format(plugin.url())) except Exception as e:", "the moment a the type has been determined for line,", "cached at self._cached_time = time.time() # Nothing more to do;", "found # in this configuration file to our list self._cached_servers.extend(", "be used. ConfigBase.logger.warning( 'Could not load URL {} on line", "is a file we shouldn't be parsing. It's owner #", "self.servers() return True if self._cached_servers else False def __nonzero__(self): \"\"\"", "self.cache < 0: err = 'A negative cache value ({})", "1 # Grab our first item _results = results.pop(0) #", "# By default set our return value to None since", "# The Default Encoding to use if not otherwise detected", "them # If we have been configured to do so", "continue if not isinstance(tokens.get(kw, None), dict): # Invalid; correct it", "OTHER DEALINGS IN # THE SOFTWARE. import os import re", "results['qsd']: results['format'] = results['qsd'].get('format') if results['format'] not in CONFIG_FORMATS: URLBase.logger.warning(", "= global_tags for key in list(_results.keys()): # Strip out any", "kw, meta in plugins.SCHEMA_MAP[schema]\\ .template_kwargs.items(): # Determine our prefix: prefix", "a list of additional configuration files referenced. You may also", "max_buffer_size = 131072 # By default all configuration is not", "memory (in a string format) that contains 'include' entries (even", "reserved if they start or end # with an underscore", "parse function which always returns a list return fn(content=content, asset=asset)", "set correctly if 'tag' in _results: # Tidy our list", "import parse_bool from ..utils import parse_urls from . import SCHEMA_MAP", "considered reserved if they start or end # with an", "if schema not in SCHEMA_MAP: ConfigBase.logger.warning( 'Unsupported include schema {}.'.format(schema))", "# our current config path url = os.path.join(self.config_path, url) url", "format type if detected, otherwise it returns None \"\"\" #", "# as additional configuration entries when loaded. include <ConfigURL> \"\"\"", "= result.get('asset', None) if tokens and isinstance(tokens, dict): for k,", "content was downloaded correctly. \"\"\" if not isinstance(self._cached_servers, list): #", "user overrides the config format on the URL # this", "Not a problem; we simply have no includes includes =", "our entries entry = 0 while len(results): # Increment our", "itself as None config_format = None # Don't read any", "that starts with a URL, or our tag # definitions", "'Invalid Apprise TEXT configuration format found ' '{} on line", "'tag' in _results: # Tidy our list up _results['tag'] =", "sublicense, and / or sell # copies of the Software,", "Grab our first item _results = results.pop(0) # tag is", "= list() # A list of additional configuration files referenced", "post process a set of tokens provided in a YAML", "#{}' .format(key, no + 1, entry)) del _results[key] ConfigBase.logger.trace( 'URL", "not load include URL: {}'.format(url)) self.logger.debug('Loading Exception: {}'.format(str(e))) continue #", "file at least has something to take action # with.", "be included # as additional configuration entries when loaded. include", "expired. return True @staticmethod def parse_url(url, verify_host=True): \"\"\"Parses the URL", "plugin using the # parsed URL information plugin = plugins.SCHEMA_MAP[results['schema']](**results)", "parsed from our URL results = SCHEMA_MAP[schema].parse_url(url) if not results:", "if results['format'] not in CONFIG_FORMATS: URLBase.logger.warning( 'Unsupported format specified {}'.format(", "configs = fn(content=content, asset=asset) self._cached_servers.extend(servers) # Configuration files were detected;", "# a comma and/or space configs.extend(parse_urls(url)) elif isinstance(url, dict): #", "by a # comma and/or space includes = parse_urls(includes) elif", "ConfigBase.logger.error( 'Undetectable Apprise configuration found ' 'based on line {}.'.format(line))", "of loaded URL ConfigBase.logger.debug( 'Loaded URL: {}'.format(plugin.url())) except Exception as", "provided in a YAML file where the user provided some", "a role # for cases where we are not caching", "our data servers.append(plugin) # Return what was loaded return (servers,", "specified config_format. If a format isn't specified, then it is", "previously loaded content for speed self._cached_servers = None # Initialize", "format found ' '{} on line {}.'.format(entry, line)) # Assume", "parsed URL which some child classes will later use to", "in enumerate(urls): # Our results object is what we use", "format if 'format' in results['qsd']: results['format'] = results['qsd'].get('format') if results['format']", "schema not in SCHEMA_MAP: ConfigBase.logger.warning( 'Unsupported include schema {}.'.format(schema)) continue", "we reach here, we have a comment entry # Adjust", "this in # and associating it with the cache value", "stop the moment a the type has been determined for", "list() try: # Load our data (safely) result = yaml.load(content,", "parse_urls from . import SCHEMA_MAP # Test whether token is", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "return False # If we reach here our configuration should", "return results # Allow overriding the default config format if", "in an Python 3.x based 'if statement'. True is returned", "to our global set results.append(r) elif isinstance(tokens, dict): # support", "only containing a list of URLs. Return a tuple that", "coding: utf-8 -*- # # Copyright (C) 2020 <NAME> <<EMAIL>>", "are required to # re-retrieve our settings. self._cached_time = None", "error or simply no data content = self.read(**kwargs) if not", "\"\"\" Initialize some general logging and common server arguments that", "Set our cache flag; it can be True or a", "of URLs. Return a tuple that looks like (servers, configs)", "for no, url in enumerate(urls): # Our results object is", "it has not expired. if self.cache is True: # we", "set this value. It would be recommended to keep it", "configuration found ' 'based on line {}.'.format(line)) # Take an", "take action # otherwise. return (list(), list()) url, config =", "content and attempts to detect the format type The function", "should be considered # missing and/or expired. return True @staticmethod", "and isinstance(getattr(asset, k), (bool, six.string_types))): # We can't set a", "specified.'.format( cache) self.logger.warning(err) raise TypeError(err) except (ValueError, TypeError): err =", "include root directive # includes = result.get('include', None) if isinstance(includes,", "our plugin using the # parsed URL information plugin =", "that contains 'include' entries (even file:// based ones). In these", "was cached at self._cached_time = time.time() return self._cached_servers def read(self):", "a template of our parsed URL as a base to", "successfully loaded our data servers.append(plugin) return (servers, configs) def pop(self,", "use them return self._cached_servers # Our cached response object self._cached_servers", "ConfigBase.logger.debug( 'Detected YAML configuration ' 'based on line {}.'.format(line)) break", "function returns the actual format type if detected, otherwise it", "configuration format ({}) was specified'.format( config_format)) return (list(), list()) #", "an underscore ConfigBase.logger.warning( 'Ignored asset key \"{}\".'.format(k)) continue if not", "object to be wrapped in an Python 2.x based 'if", "entry #{}'.format( url, no + 1)) continue # add our", "this file) which will be included # as additional configuration", "to take action # with. ConfigBase.logger.warning( 'Ignored entry {} found", "- If we find a string that starts with a", "are invalid or can not be used. ConfigBase.logger.warning( 'Could not", "acquire the schema at the very least to allow #", "return (list(), list()) # YAML Version version = result.get('version', 1)", "were a yaml file specifically formatted for Apprise. Return a", "this is a file we shouldn't be parsing. It's owner", "not (url or config): # Comment/empty line; do nothing continue", "a special keyword that is managed by Apprise object. #", "server arguments that will keep things consistent when working with", "managed by Apprise object. # The below ensures our tags", "limit to how high you set this value. It would", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL", "a boolean, then # we want to convert the specified", "(servers, configs) where: - servers contains a list of loaded", "over each config URL for no, url in enumerate(includes): if", "'Invalid Apprise YAML based configuration specified.') return (list(), list()) #", "with a YAML file. # - If we find a", "if _url is None: # the loop above failed to", "list(_results.keys()): # Strip out any tokens we know that we", "if our content was downloaded correctly. \"\"\" if not isinstance(self._cached_servers,", "If the file we remotely retrieve also has an `include`", "the Apprise object to be wrapped in an Python 2.x", "Store the url and ignore arguments associated configs.extend(u for u", "files. allow_cross_includes = ContentIncludeMode.NEVER # the config path manages the", "'encoding' in results['qsd']: results['encoding'] = results['qsd'].get('encoding') # Our cache value", "if schema in plugins.SCHEMA_MAP: entries = ConfigBase.__extract_special_tokens( schema, entries) #", "version = result.get('version', 1) if version != 1: # Invalid", "value to an int identifying the number of seconds the", "to do; return our empty cache list return self._cached_servers #", "\"\"\" Removes an indexed Notification Service from the stack and", "if not results: # We're done; we failed to parse", "to 2 deep. If set to zero it is off.", "configs.extend(parse_urls(url)) elif isinstance(url, dict): # Store the url and ignore", "considered # missing and/or expired. return True @staticmethod def parse_url(url,", "schema, entries) # Extend our dictionary with our new entries", "add our results to our global set results.append(r) elif isinstance(tokens,", "plugin type - tokens must be a dictionary containing the", "owner # can read the error printed to screen and", "where a self hosting apprise developer may wish to load", "This place a role # for cases where we are", "child classes \"\"\" return None def expired(self): \"\"\" Simply returns", "'based on line {}.'.format(line)) break elif result.group('text'): config_format = ConfigFormat.TEXT", "# We are a url string with additional unescaped options", "# in the Software without restriction, including without limitation the", "configuration and returns all of the services that could be", "can't over-ride if 'schema' in entries: del entries['schema'] # support", "ConfigFormat from ..common import CONFIG_FORMATS from ..common import ContentIncludeMode from", "isn't an integer; now treat it as a bool #", "not (hasattr(asset, k) and isinstance(getattr(asset, k), (bool, six.string_types))): # We", "= list() try: # Load our data (safely) result =", "files were detected; recursively populate them # If we have", "YAML entry #{}'.format(no + 1)) continue # Track our entries", "of loaded Notification Services servers = list() # A list", "\"\"\" # Detect Format Logic: # - A pound/hashtag (#)", "space configs.extend(parse_urls(url)) elif isinstance(url, dict): # Store the url and", "manages the handling of relative include config_path = os.getcwd() def", "it if recursion is set to 2 deep. If set", "a YAML file where the user provided some of the", "are disabled. When set to True, all Apprise Config files", "this configuration file to our list self._cached_servers.extend( cfg_plugin.servers(asset=asset)) # We", "Increment our entry count entry += 1 # Grab our", "cache=True, recursion=0, insecure_includes=False, **kwargs): \"\"\" Initialize some general logging and", "in enumerate(content, start=1): result = valid_line_re.match(entry) if not result: #", "ConfigBase.logger.warning( 'Unparseable URL {} on line {}.'.format(url, line)) continue #", "url in enumerate(urls): # Our results object is what we", "have no includes includes = list() # Iterate over each", "r'(?P<url>[a-z0-9]{2,9}://.*)|' r'include\\s+(?P<config>.+))?\\s*$', re.I) try: # split our content up to", "the newly added # notifications if any were set results['tag']", "The URL you want to fully parse. verify_host (:obj:`bool`, optional):", "isinstance(self._cached_servers, list) and self.cache: # We have enough reason to", "e)) return (list(), list()) if not isinstance(result, dict): # Invalid", "comment entry # Adjust default format to TEXT config_format =", "+ 1)) continue # add our results to our global", "ConfigFormat.TEXT return config_format @staticmethod def config_parse(content, asset=None, config_format=None, **kwargs): \"\"\"", "loaded our data servers.append(plugin) # Return what was loaded return", "arguments are invalid or can not be used. self.logger.warning( 'Could", "with the newly added # notifications if any were set", "# due to errors if six.PY2: it = url.iteritems() else:", "# Prepare our Asset Object _results['asset'] = asset try: #", "not matches: # we're done with this entry continue if", "string... schema = GET_SCHEMA_RE.match(url) if schema is None: # Log", "as a bool # instead: results['cache'] = parse_bool(results['qsd']['cache']) return results", "in CONFIG_FORMATS: # Invalid configuration type specified ConfigBase.logger.error( 'An invalid", "global tag root directive # global_tags = set() tags =", "sell # copies of the Software, and to permit persons", "else: # Ensure our schema is always in lower case", "it = iter(url.items()) # Track the URL to-load _url =", "from self.servers() return True if self._cached_servers else False def __nonzero__(self):", "asset with the notification. \"\"\" # A list of loaded", "user what entries were ignored # due to errors if", "# Comment/empty line; do nothing continue if config: ConfigBase.logger.debug('Include URL:", "to match anything ConfigBase.logger.warning( 'Unsupported URL, entry #{}'.format(no + 1))", "# if we reach here, we can now add this", "of tracking; store it's # details: _results = plugins.url_to_dict(url) if", "<<EMAIL>> # All rights reserved. # # This code is", "servers(self, asset=None, **kwargs): \"\"\" Performs reads loaded configuration and returns", "entries so that maintainer of config # config file at", "**kwargs): \"\"\" Performs reads loaded configuration and returns all of", "or False if content should be retrieved. \"\"\" if isinstance(self._cached_servers,", "= 'An invalid cache value ({}) was specified.'.format(cache) self.logger.warning(err) raise", "else: # six.PY3 _url, tokens = next(iter(url.items())) # Tags you", "some general logging and common server arguments that will keep", "def config_parse_text(content, asset=None): \"\"\" Parse the specified content as though", "set results['tag'] = set(parse_list(result.group('tags'))) # Prepare our Asset Object results['asset']", "distribute, sublicense, and / or sell # copies of the", "add this servers found # in this configuration file to", "Create log entry of loaded URL ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url())) except", "= GET_SCHEMA_RE.match(url) if schema is None: # Log invalid entries", "# Ensure our schema is always in lower case schema", "os.path.join(self.config_path, url) url = '{}://{}'.format(schema, URLBase.quote(url)) else: # Ensure our", "know # what the format is yet config_format = None", "<ConfigURL> \"\"\" # A list of loaded Notification Services servers", "these keywords map to their appropriate value they're expected \"\"\"", "has an `include` reference, we will only advance through it", "# None is returned if there was an error or", "AppriseAsset) else AppriseAsset() try: # Attempt to create an instance", "import os import re import six import yaml import time", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "age_in_sec = time.time() - self._cached_time if age_in_sec <= self.cache: #", "result: # Invalid syntax ConfigBase.logger.error( 'Undetectable Apprise configuration found '", "list()) if config_format not in CONFIG_FORMATS: # Invalid configuration type", "current config path url = os.path.join(self.config_path, url) url = '{}://{}'.format(schema,", "one it woul fail. However this include would be possible", "containing the URL fully parsed if successful, otherwise None is", "on the specified config_format. If a format isn't specified, then", "more tags can be idenified using comma's (,) to separate", "Asset is a boolean, then # we want to convert", "results.append(_results) else: # Unsupported ConfigBase.logger.warning( 'Unsupported Apprise YAML entry #{}'.format(no", "in _results.items()]))) # Prepare our Asset Object _results['asset'] = asset", "= '' if (isinstance(v, (bool, six.string_types)) and isinstance(getattr(asset, k), bool)):", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "By default we cache our responses so that subsiquent calls", "the server object as dictionary # containing all of the", "False # If we reach here our configuration should be", "though it were a simple text file only containing a", "to load Apprise configuration from {}'.format( self.url())) # Set the", "# Dynamically load our parse_ function based on our config", "be implimented by the child classes \"\"\" return None def", "= time.time() - self._cached_time if age_in_sec <= self.cache: # We", "configuration') return (list(), list()) if config_format not in CONFIG_FORMATS: #", "in a YAML file where the user provided some of", "the stack return self._cached_servers.pop(index) @staticmethod def __extract_special_tokens(schema, tokens): \"\"\" This", "an Python 2.x based 'if statement'. True is returned if", "URLs. Return a tuple that looks like (servers, configs) where:", "is None: # Log invalid entries so that maintainer of", "If we find a string followed by a colon, we", "URL ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url())) except Exception as e: # the", "of the server object as dictionary # containing all of", "no # reason we should be reading in more. This", "default_config_format = ConfigFormat.TEXT # This is only set if the", "Logic: # - A pound/hashtag (#) is alawys a comment", "from .. import plugins from ..AppriseAsset import AppriseAsset from ..URLBase", "set a function or non-string set value ConfigBase.logger.warning( 'Invalid asset", "only advance through it if recursion is set to 2", "{k: v for k, v in tokens.items() if not k.startswith(prefix)}", "to their appropriate value they're expected \"\"\" # Create a", "customized for Apprise. Args: url (str): The URL you want", "ConfigBase.logger.error( 'Invalid Apprise TEXT configuration format found ' '{} on", "we find a string followed by a colon, we know", "successfully loaded our data servers.append(plugin) # Return what was loaded", "non-comment and non blank line # matched. # - If", "# work with r = _results.copy() # add our result", "Execute our config parse function which always returns a tuple", "return (servers, configs) def pop(self, index=-1): \"\"\" Removes an indexed", "not VALID_TOKEN = re.compile( r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I) class ConfigBase(URLBase): \"\"\" This", "to how high you set this value. It would be", "Service from the stack and returns it. By default, the", "Apprise YAML configuration ' 'entry #{}, item #{}' .format(no +", "GET_SCHEMA_RE from ..utils import parse_list from ..utils import parse_bool from", "pull from self.servers() return len(self._cached_servers) def __bool__(self): \"\"\" Allows the", "an iterator to our server list \"\"\" if not isinstance(self._cached_servers,", "# copies of the Software, and to permit persons to", "# The default expected configuration format unless otherwise # detected", "\\ self.default_config_format \\ if self.config_format is None else self.config_format #", "We're dealing with a relative path; prepend # our current", "line)) ConfigBase.logger.debug('Loading Exception: %s' % str(e)) continue # if we", "result = valid_line_re.match(entry) if not result: # Invalid syntax ConfigBase.logger.error(", "config_format = ConfigFormat.TEXT ConfigBase.logger.debug( 'Detected TEXT configuration ' 'based on", "least has something to take action # with. ConfigBase.logger.warning( 'Invalid", "results['qsd'].get('format') if results['format'] not in CONFIG_FORMATS: URLBase.logger.warning( 'Unsupported format specified", "asset with the notification. The file syntax is: # #", "> 0: # Attempt to acquire the schema at the", ".template_kwargs.items(): # Determine our prefix: prefix = meta.get('prefix', '+') #", "# you can also use the keyword 'include' and identify", "from the stack and returns it. By default, the last", "matched here. # - Detection begins on the first non-comment", "detected, otherwise it returns None \"\"\" # Detect Format Logic:", "and our configuration servers, configs = fn(content=content, asset=asset) self._cached_servers.extend(servers) #", "parsed from the URL; we decrement # it one level", "that looks like (servers, configs) where: - servers contains a", "Store our schema schema = _schema.group('schema').lower() # Store our URL", "not isinstance(tokens.get(kw, None), dict): # Invalid; correct it tokens[kw] =", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "over-ride if 'schema' in entries: del entries['schema'] # support our", "the URL results['insecure_includes'] = self.insecure_includes try: # Attempt to create", "a (positive) integer try: self.cache = cache if isinstance(cache, bool)", "results to our global set results.append(_results) else: # Unsupported ConfigBase.logger.warning(", "settings. self._cached_time = None # Tracks previously loaded content for", "{} on line {}.'.format( url, line)) ConfigBase.logger.debug('Loading Exception: %s' %", "class ConfigBase(URLBase): \"\"\" This is the base class for all", "= None # Tracks previously loaded content for speed self._cached_servers", "we've already # declared (prior to our recursion) results['cache'] =", "Permission is hereby granted, free of charge, to any person", "we can now add this servers found # in this", "# Store the encoding self.encoding = kwargs.get('encoding') if 'format' in", "YAML configuration ' 'based on line {}.'.format(line)) break elif result.group('text'):", "err = 'A negative cache value ({}) was specified.'.format( cache)", "cfg_plugin.servers(asset=asset)) # We no longer need our configuration object del", "may wish to load configuration from memory (in a string", "# We have enough reason to look further into our", "with the notification. The file syntax is: # # pound/hashtag", "this default value set to True. Returns: A dictionary is", "Store our include line configs.append(config.strip()) continue # Acquire our url", "from ..AppriseAsset import AppriseAsset from ..URLBase import URLBase from ..common", "Apprise object to be wrapped in an Python 3.x based", "will keep things consistent when working with the configurations that", "else self.config_format # Dynamically load our parse_ function based on", "instead: results['cache'] = parse_bool(results['qsd']['cache']) return results @staticmethod def detect_config_format(content, **kwargs):", "object del cfg_plugin else: self.logger.debug( 'Recursion limit reached; ignoring Include", "we reach here, we successfully loaded our data servers.append(plugin) #", "so we # can at least tell the end user", "self).__init__(**kwargs) # Tracks the time the content was last retrieved", "({}) was specified.'.format( cache) self.logger.warning(err) raise TypeError(err) except (ValueError, TypeError):", "object should be implimented by the child classes \"\"\" return", "can also use the keyword 'include' and identify a #", "entries entry = 0 while len(results): # Increment our entry", "new value setattr(asset, k, v.strip()) else: # we must set", "we will only advance through it if recursion is set", "Just use the global settings _results['tag'] = global_tags for key", "stack and returns it. By default, the last element of", "self._cached_time = time.time() # Nothing more to do; return our", "setattr(asset, k, parse_bool(v)) elif isinstance(v, six.string_types): # Set our asset", "results = SCHEMA_MAP[schema].parse_url(url) if not results: # Failed to parse", "return value to None since we don't know # what", "isinstance(includes, six.string_types): # Support a single inline string or multiple", "were ignored # due to errors if six.PY2: it =", "of loaded notification plugins - configs contains a list of", "with. ConfigBase.logger.warning( 'Ignored entry {} found under urls, entry #{}'", "Take an early exit return None # Attempt to detect", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "or more tags can be idenified using comma's (,) to", "Performs reads loaded configuration and returns all of the services", "directive # urls = result.get('urls', None) if not isinstance(urls, (list,", "string with additional unescaped options if isinstance(entries, dict): if six.PY2:", "acquired schema schema = None for key, tokens in it:", "below ensures our tags are set correctly if 'tag' in", "template of our parsed URL as a base to #", "list() # A list of additional configuration files referenced using", "tokens results = plugins.url_to_dict(url) if results is None: # Failed", "to convert the specified string to # match that. setattr(asset,", "to permit persons to whom the Software is # furnished", "uses a default if one wasn't one detected # or", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "no + 1, entry)) del _results[key] ConfigBase.logger.trace( 'URL #{}: {}", "in configs: if self.recursion > 0: # Attempt to acquire", "any more of this amount of data into memory as", "\\ ContentIncludeMode.NEVER: # Prevent the loading if insecure base protocols", "and returns it broken apart into a dictionary. This is", "# for cases where we are not caching our response", "CONFIG_FORMATS: # Simple error checking err = 'An invalid config", "that could be parsed and loaded. \"\"\" if not self.expired():", "if not os.path.isabs(url): # We're dealing with a relative path;", "commas) followed by an equal sign we know # we're", "self.recursion - 1 # Insecure Includes flag can never be", "valid schema worthy of tracking; store it's # details: _results", "be somewhat inefficient if disabled. Only disable caching if you", "to load configuration from memory (in a string format) that", "_url = key if _url is None: # the loop", "encoding = 'utf-8' # The default expected configuration format unless", "use, copy, modify, merge, publish, distribute, sublicense, and / or", "= result.get('urls', None) if not isinstance(urls, (list, tuple)): # Not", "# notifications if any were set results['tag'] = set(parse_list(result.group('tags'))) #", "least has something to take action # with. ConfigBase.logger.warning( 'Ignored", "the user provided some of the special keywords. We effectivley", "\"\"\" if config_format is None: # Detect the format config_format", "results['qsd']: # First try to get it's integer value try:", "(list(), list()) # # global asset object # asset =", "else: self.logger.warning( 'Failed to load Apprise configuration from {}'.format( self.url()))", "problem; we simply have no urls urls = list() #", "urls. schema = GET_SCHEMA_RE.match(url) if schema is None: # Plan", "six.PY3 _url, tokens = next(iter(url.items())) # Tags you just can't", "our new entries r.update(entries) # add our results to our", "url tokens results = plugins.url_to_dict(url) if results is None: #", "results['qsd'].get('encoding') # Our cache value if 'cache' in results['qsd']: #", "worthy of tracking; store it's # details: _results = plugins.url_to_dict(url)", "working with the configurations that inherit this class. By default", "= plugins.url_to_dict(url) if results is None: # Failed to parse", "special tokens (if they're present) if schema in plugins.SCHEMA_MAP: tokens", "index): \"\"\" Returns the indexed server entry associated with the", "reason to look further into our cached content # and", "ignored # due to errors if six.PY2: it = url.iteritems()", "any matches matches = \\ {k[1:]: str(v) for k, v", "import CONFIG_FORMATS from ..common import ContentIncludeMode from ..utils import GET_SCHEMA_RE", "key \"{}\".'.format(k)) continue if not (hasattr(asset, k) and isinstance(getattr(asset, k),", "content we can pull from self.servers() return self._cached_servers[index] def __iter__(self):", "again. For local file references this makes no difference at", "if successful, otherwise None is returned. \"\"\" results = URLBase.parse_url(url,", "over # lines matched here. # - Detection begins on", "A dictionary is returned containing the URL fully parsed if", "# Prepare our Asset Object results['asset'] = asset # No", "If we have been configured to do so for url", "flag self.insecure_includes = insecure_includes if 'encoding' in kwargs: # Store", "overrides the config format on the URL # this should", "simply no data content = self.read(**kwargs) if not isinstance(content, six.string_types):", "our data (safely) result = yaml.load(content, Loader=yaml.SafeLoader) except (AttributeError, yaml.parser.ParserError,", "# Extend our dictionary with our new entries r.update(entries) #", "continue if v is None: # Convert to an empty", "as dictionary # containing all of the information parsed from", "fetch more configuration from another source and add it to", "is strongly recomended that you leave this default value set", "to use if not otherwise detected encoding = 'utf-8' #", "what the format is yet config_format = None # iterate", "specified ConfigBase.logger.error( 'An invalid configuration format ({}) was specified'.format( config_format))", "else: # add our results to our global set results.append(_results)", "r'((?P<yaml>[a-z0-9]+):.*))?$', re.I) try: # split our content up to read", "None) if isinstance(includes, six.string_types): # Support a single inline string", "a format isn't specified, then it is auto detected. \"\"\"", "like valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(\\s*(?P<tags>[^=]+)=|=)?\\s*' r'(?P<url>[a-z0-9]{2,9}://.*)|' r'include\\s+(?P<config>.+))?\\s*$', re.I) try:", "# We are a url string with additional unescaped options.", "None: # Convert to an empty string v = ''", "us to fetch more configuration from another source and add", "was not expected string type ConfigBase.logger.error( 'Invalid Apprise TEXT based", "configuration location (like this file) which will be included #", "# Adjust default format to TEXT config_format = ConfigFormat.TEXT return", "plugin using the # parsed URL information plugin = plugins.SCHEMA_MAP[_results['schema']](**_results)", "they're present) if schema in plugins.SCHEMA_MAP: entries = ConfigBase.__extract_special_tokens( schema,", "# Nothing more to do; return our empty cache list", "provided some of the special keywords. We effectivley look up", "\"{}\".'.format(k)) continue if not (hasattr(asset, k) and isinstance(getattr(asset, k), (bool,", "find a string followed by a colon, we know we're", "\"{}\".'.format(k)) continue # # global tag root directive # global_tags", "# configuration location (like this file) which will be included", "isinstance(tokens, (list, tuple, set)): # populate and/or override any results", "is None: # Convert to an empty string v =", "it to our existing compilation. If the file we remotely", "We effectivley look up what these keywords map to their", "it is auto detected. \"\"\" if config_format is None: #", "location (like this file) which will be included # as", "our result set r.update(tokens) # add our results to our", "no longer include any special tokens such as +,-, and", "in url.keys()) # # urls root directive # urls =", "SSL transactions take place). Unless under very specific circumstances, it", "couldn't detect configuration ConfigBase.logger.error('Could not detect configuration') return (list(), list())", "ConfigBase.__extract_special_tokens( schema, tokens) # Copy ourselves a template of our", "map to their appropriate value they're expected \"\"\" # Create", "expected string type ConfigBase.logger.error( 'Invalid Apprise configuration specified.') return None", "# warn the user match = VALID_TOKEN.match(key) if not match:", "and to permit persons to whom the Software is #", "value ({}) was specified.'.format(cache) self.logger.warning(err) raise TypeError(err) return def servers(self,", "list()) if not isinstance(result, dict): # Invalid content ConfigBase.logger.error( 'Invalid", "so that subsiquent calls does not cause the content to", "error checking err = 'An invalid config format ({}) was", "if not isinstance(tokens.get(kw, None), dict): # Invalid; correct it tokens[kw]", "hereby granted, free of charge, to any person obtaining a", "colon, we know we're dealing # with a YAML file.", "import parse_list from ..utils import parse_bool from ..utils import parse_urls", "Failed to parse the server URL self.logger.warning( 'Unparseable include URL", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "asset=None): \"\"\" Parse the specified content as though it were", "True. Returns: A dictionary is returned containing the URL fully", "consequences. You can alternatively set the cache value to an", "self.cache is True: # we have not expired, return False", "' '{} on line {}.'.format(entry, line)) # Assume this is", "it takes for the # config plugin to load the", "# all copies or substantial portions of the Software. #", "elif isinstance(v, six.string_types): # Set our asset object with the", "URL as a base to # work with r =", "configs.extend(u for u in url.keys()) # # urls root directive", "the indexed server entry associated with the loaded notification servers", "based on allow_cross_includes rules if (SCHEMA_MAP[schema].allow_cross_includes == ContentIncludeMode.STRICT and schema", "our content up to read line by line content =", "don't know # what the format is yet config_format =", "found in YAML ' 'configuration entry #{}, item #{}' .format(key,", "PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE # AUTHORS", "list() # read() causes the child class to do whatever", "determined for line, entry in enumerate(content, start=1): result = valid_line_re.match(entry)", "Detection begins on the first non-comment and non blank line", "# of this software and associated documentation files(the \"Software\"), to", "value set to True. Returns: A dictionary is returned containing", "global set results.append(r) else: # add our results to our", "enumerate(content, start=1): result = valid_line_re.match(entry) if not result: # Invalid", "break elif result.group('text'): config_format = ConfigFormat.TEXT ConfigBase.logger.debug( 'Detected TEXT configuration", "set results.append(_results) else: # Unsupported ConfigBase.logger.warning( 'Unsupported Apprise YAML entry", "matches = \\ {k[1:]: str(v) for k, v in tokens.items()", "if self.cache is True: # we have not expired, return", "least tell the end user what entries were ignored #", "take action # with. ConfigBase.logger.warning( 'Ignored entry {} found under", "and : - schema must be a valid schema of", "True, all Apprise Config files marked to be in STRICT", "a string ConfigBase.logger.warning( 'Invalid asset value to \"{}\".'.format(k)) continue #", "matched. # - If we find a string followed by", "specified.') return None # By default set our return value", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "continue # add our results to our global set results.append(_results)", "# missing and/or expired. return True @staticmethod def parse_url(url, verify_host=True):", "the file we remotely retrieve also has an `include` reference,", "Attempt to create an instance of our plugin using the", "is: # # pound/hashtag allow for line comments # #", "servers.append(plugin) return (servers, configs) def pop(self, index=-1): \"\"\" Removes an", "a simple text file only containing a list of URLs.", "= SCHEMA_MAP[results['schema']](**results) except Exception as e: # the arguments are", "Returns the total number of servers loaded \"\"\" if not", "allow_cross_includes rules if (SCHEMA_MAP[schema].allow_cross_includes == ContentIncludeMode.STRICT and schema not in", "separate # them. <Tag(s)>=<URL> # Or you can use this", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "' 'based on line {}.'.format(line)) break elif result.group('text'): config_format =", "yaml.load(content, Loader=yaml.SafeLoader) except (AttributeError, yaml.parser.ParserError, yaml.error.MarkedYAMLError) as e: # Invalid", "any tokens we know that we can't accept and #", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "load Apprise configuration from {}'.format( self.url())) # Set the time", "include <ConfigURL> \"\"\" # A list of loaded Notification Services", "the # parsed URL information plugin = plugins.SCHEMA_MAP[_results['schema']](**_results) # Create", "not detect configuration') return (list(), list()) if config_format not in", "'include' # line found in configuration files. allow_cross_includes = ContentIncludeMode.NEVER", "inherit this class. By default we cache our responses so", "end # with an underscore ConfigBase.logger.warning( 'Ignored asset key \"{}\".'.format(k))", "string with additional unescaped options. In # this case we", "# six.PY3 _url, tokens = next(iter(url.items())) # Tags you just", "match: ConfigBase.logger.warning( 'Ignoring invalid token ({}) found in YAML '", "configuration object del cfg_plugin else: self.logger.debug( 'Recursion limit reached; ignoring", "tokens = {k: v for k, v in tokens.items() if", "= iter(url.items()) # Track the URL to-load _url = None", "Invalid configuration type specified ConfigBase.logger.error( 'An invalid configuration format ({})", "all copies or substantial portions of the Software. # #", "ConfigBase.logger.warning( 'Unsupported Apprise YAML entry #{}'.format(no + 1)) continue #", "a in _results.items()]))) # Prepare our Asset Object _results['asset'] =", "case we want to iterate over all of our options", "'Ignored entry {} found under urls, entry #{}' .format(key, no", "parse the server URL ConfigBase.logger.warning( 'Unparseable URL {} on line", "to create an instance of our plugin using the #", "our data servers.append(plugin) return (servers, configs) def pop(self, index=-1): \"\"\"", "return self._cached_servers.pop(index) @staticmethod def __extract_special_tokens(schema, tokens): \"\"\" This function takes", "the configurations that inherit this class. By default we cache", "parsed URL information plugin = plugins.SCHEMA_MAP[_results['schema']](**_results) # Create log entry", "tag is a special keyword that is managed by Apprise", "with the parsed URL which some child classes will later", "configuration for example, only a file:// based configuration can include", "use to instantiate our object if # we can. Reset", "dictionary # containing all of the information parsed from our", "we will get our # content again. age_in_sec = time.time()", "= VALID_TOKEN.match(key) if not match: ConfigBase.logger.warning( 'Ignoring invalid token ({})", "tokens.copy() for kw, meta in plugins.SCHEMA_MAP[schema]\\ .template_kwargs.items(): # Determine our", "following conditions : # # The above copyright notice and", "This is only set if the user overrides the config", "r.update(entries) # add our results to our global set results.append(r)", "process a set of tokens provided in a YAML file", "is removed. \"\"\" if not isinstance(self._cached_servers, list): # Generate ourselves", "import GET_SCHEMA_RE from ..utils import parse_list from ..utils import parse_bool", "configuration file attempted to include a file:// one it woul", "meta.get('prefix', '+') # Detect any matches matches = \\ {k[1:]:", "key \"{}\".'.format(k)) continue if v is None: # Convert to", "correctly. \"\"\" if not isinstance(self._cached_servers, list): # Generate ourselves a", "recursion # Initialize our insecure_includes flag self.insecure_includes = insecure_includes if", "self._cached_time if age_in_sec <= self.cache: # We have not expired;", "overriding the default config format if 'format' in results['qsd']: results['format']", "None config_format = None # Don't read any more of", "path url = os.path.join(self.config_path, url) url = '{}://{}'.format(schema, URLBase.quote(url)) else:", "(like this file) which will be included # as additional", "configuration specified.') return (list(), list()) # YAML Version version =", "ALWAYS mode. Take a file:// based configuration for example, only", "self._cached_servers # Our Configuration format uses a default if one", "verify it has not expired. if self.cache is True: #", "URL self.logger.warning( 'Unparseable include URL {}'.format(url)) continue # Handle cross", "config): # Comment/empty line; do nothing continue if config: ConfigBase.logger.debug('Include", "plugins.url_to_dict(_url) if _results is None: # Setup dictionary _results =", "special tokens (if they're present) if schema in plugins.SCHEMA_MAP: entries", "The below ensures our tags are set correctly if 'tag'", "type The function returns the actual format type if detected,", "import plugins from ..AppriseAsset import AppriseAsset from ..URLBase import URLBase", "_results['tag'] = \\ set(parse_list(_results['tag'])) | global_tags else: # Just use", "requires us to fetch more configuration from another source and", "\"\"\" if not self.expired(): # We already have cached results", "URL for no, url in enumerate(includes): if isinstance(url, six.string_types): #", "starts with a URL, or our tag # definitions (accepting", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "TEXT configuration format found ' '{} on line {}.'.format(entry, line))", "string format) that contains 'include' entries (even file:// based ones).", "the arguments are invalid or can not be used. self.logger.warning(", "find a string that starts with a URL, or our", "TEXT based configuration specified.') return (list(), list()) for line, entry", "# Prevent the loading if insecure base protocols ConfigBase.logger.warning( 'Including", "True @staticmethod def parse_url(url, verify_host=True): \"\"\"Parses the URL and returns", "try: # Load our data (safely) result = yaml.load(content, Loader=yaml.SafeLoader)", "default we cache our responses so that subsiquent calls does", "a URL, or our tag # definitions (accepting commas) followed", "Loader=yaml.SafeLoader) except (AttributeError, yaml.parser.ParserError, yaml.error.MarkedYAMLError) as e: # Invalid content", "return (list(), list()) if not isinstance(result, dict): # Invalid content", "- If we find a string followed by a colon,", "Reset it to None on each iteration results = list()", "of our parsed URL as a base # to work", "should look like valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(\\s*(?P<tags>[^=]+)=|=)?\\s*' r'(?P<url>[a-z0-9]{2,9}://.*)|' r'include\\s+(?P<config>.+))?\\s*$',", "We have not expired; return False return False # If", "copy # of this software and associated documentation files(the \"Software\"),", "if the user overrides the config format on the URL", "- self._cached_time if age_in_sec <= self.cache: # We have not", "based one. because it is set to STRICT mode. If", "not caching our response and are required to # re-retrieve", "object is what we use to instantiate our object if", "what these keywords map to their appropriate value they're expected", "of the file to attempt to detect it # stop", "None else self.config_format # Dynamically load our parse_ function based", "# pound/hashtag allow for line comments # # One or", "configuration specified.') return (list(), list()) for line, entry in enumerate(content,", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "encoding self.encoding = kwargs.get('encoding') if 'format' in kwargs \\ and", "# - A pound/hashtag (#) is alawys a comment character", "a comment entry # Adjust default format to TEXT config_format", "= parse_bool(results['qsd']['cache']) return results @staticmethod def detect_config_format(content, **kwargs): \"\"\" Takes", "configuration type specified ConfigBase.logger.error( 'An invalid configuration format ({}) was", "and returns all of the services that could be parsed", "if version != 1: # Invalid syntax ConfigBase.logger.error( 'Invalid Apprise", "age_in_sec <= self.cache: # We have not expired; return False", "if isinstance(self._cached_servers, list) and self.cache: # We have enough reason", "# add our result set r.update(tokens) # add our results", "more to do; return our empty cache list return self._cached_servers", "(positive) integer try: self.cache = cache if isinstance(cache, bool) else", "results # Allow overriding the default config format if 'format'", "a list of URLs. Return a tuple that looks like", "loaded content for speed self._cached_servers = None # Initialize our", "entries (even file:// based ones). In these circumstances if you", "all of the services that could be parsed and loaded.", "= os.path.join(self.config_path, url) url = '{}://{}'.format(schema, URLBase.quote(url)) else: # Ensure", "specifically formatted for Apprise. Return a tuple that looks like", "Apprise YAML version specified {}.'.format(version)) return (list(), list()) # #", "{ # Minimum requirements 'schema': schema, } if isinstance(tokens, (list,", "URLBase.quote(url)) else: # Ensure our schema is always in lower", "to acquire the schema at the very least to allow", "content to be retrieved again. For local file references this", "broken apart into a dictionary. This is very specific and", "SCHEMA_MAP: ConfigBase.logger.warning( 'Unsupported include schema {}.'.format(schema)) continue # Parse our", "reference, we will only advance through it if recursion is", "results['encoding'] = results['qsd'].get('encoding') # Our cache value if 'cache' in", "continue # Build a list of tags to associate with", "to an int identifying the number of seconds the previously", "ones). In these circumstances if you want these 'include' entries", "# Generate ourselves a list of content we can pull", "of the services that could be parsed and loaded. \"\"\"", "more of a safe guard then # anything else. 128KB", "URL fully parsed if successful, otherwise None is returned. \"\"\"", "on line {}.'.format(line)) # Take an early exit return None", "<= self.cache: # We have not expired; return False return", "will only advance through it if recursion is set to", "we can. Reset it to None on each iteration results", ".format(no + 1, entry)) ConfigBase.logger.debug('Loading Exception: %s' % str(e)) continue", "loaded Notification Services servers = list() # A list of", "THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import", "invalid config format ({}) was specified.'.format( self.config_format) self.logger.warning(err) raise TypeError(err)", "the content to be retrieved again. For local file references", "# We already have cached results to return; use them", "content) except TypeError: # content was not expected string type", "# Invalid syntax ConfigBase.logger.error( 'Undetectable Apprise configuration found ' 'based", "In these circumstances if you want these 'include' entries to", "mode are treated as being in ALWAYS mode. Take a", "value if 'cache' in results['qsd']: # First try to get", "loads it based on the specified config_format. If a format", "kwargs.get('encoding') if 'format' in kwargs \\ and isinstance(kwargs['format'], six.string_types): #", "on each iteration results = list() if isinstance(url, six.string_types): #", "schema schema = _schema.group('schema').lower() # Store our URL and Schema", "list of additional configuration files referenced. You may optionally associate", "format self.config_format = kwargs.get('format').lower() if self.config_format not in CONFIG_FORMATS: #", "so for url in configs: if self.recursion > 0: #", "Store the enforced config format self.config_format = kwargs.get('format').lower() if self.config_format", "import AppriseAsset from ..URLBase import URLBase from ..common import ConfigFormat", "to assume we're dealing with a file schema = 'file'", "be idenified using comma's (,) to separate # them. <Tag(s)>=<URL>", "string that starts with a URL, or our tag #", "for no, url in enumerate(includes): if isinstance(url, six.string_types): # Support", "function takes a list of tokens and updates them to", "#{}'.format(no + 1)) continue _results = plugins.url_to_dict(_url) if _results is", "could be parsed and loaded. \"\"\" if not self.expired(): #", "of additional configuration files referenced using # the include keyword", "strip out processed tokens tokens = {k: v for k,", "string to # match that. setattr(asset, k, parse_bool(v)) elif isinstance(v,", "payload if 'encoding' in results['qsd']: results['encoding'] = results['qsd'].get('encoding') # Our", "function which always returns a tuple # of our servers", "# Log invalid entries so that maintainer of config #", "'config_parse_{}'.format(config_format)) # Execute our config parse function which always returns", "ConfigBase.logger.debug( 'Detected TEXT configuration ' 'based on line {}.'.format(line)) break", "results['format'] not in CONFIG_FORMATS: URLBase.logger.warning( 'Unsupported format specified {}'.format( results['format']))", "basic validation if schema not in SCHEMA_MAP: ConfigBase.logger.warning( 'Unsupported include", "len(results): # Increment our entry count entry += 1 #", "# - If we find a string that starts with", "with a TEXT format. # Define what a valid line", "if self.recursion > 0: # Attempt to acquire the schema", "in tokens.items(): if k.startswith('_') or k.endswith('_'): # Entries are considered", "def __nonzero__(self): \"\"\" Allows the Apprise object to be wrapped", "last element of the list is removed. \"\"\" if not", "1)) continue # Store our schema schema = _schema.group('schema').lower() #", "insecure_include by default are disabled. When set to True, all", "nothing continue if config: ConfigBase.logger.debug('Include URL: {}'.format(config)) # Store our", "results @staticmethod def detect_config_format(content, **kwargs): \"\"\" Takes the specified content", "a default if one wasn't one detected # or enfored.", "the specified config content and loads it based on the", "the handling of relative include config_path = os.getcwd() def __init__(self,", "to iterate over all of our options so we #", "negative cache value ({}) was specified.'.format( cache) self.logger.warning(err) raise TypeError(err)", "based configuration specified.') return (list(), list()) for line, entry in", "urls = list() # Iterate over each URL for no,", "= None # Don't read any more of this amount", "entries r.update(entries) # add our results to our global set", "a safe guard then # anything else. 128KB (131072B) max_buffer_size", "to parse the server URL self.logger.warning( 'Unparseable include URL {}'.format(url))", "= dict() # strip out processed tokens tokens = {k:", "optionally associate an asset with the notification. \"\"\" # A", "an Python 3.x based 'if statement'. True is returned if", "SCHEMA_MAP # Test whether token is valid or not VALID_TOKEN", "begins on the first non-comment and non blank line #", "associated with the loaded notification servers \"\"\" if not isinstance(self._cached_servers,", "@staticmethod def parse_url(url, verify_host=True): \"\"\"Parses the URL and returns it", "Prepare our Asset Object _results['asset'] = asset try: # Attempt", "k, v in tokens.items(): if k.startswith('_') or k.endswith('_'): # Entries", "(ValueError, TypeError): # No problem, it just isn't an integer;", "format on the URL # this should always initialize itself", "http:// based configuration file attempted to include a file:// one", "Unsupported ConfigBase.logger.warning( 'Unsupported Apprise YAML entry #{}'.format(no + 1)) continue", "ConfigBase.logger.debug('Loading Exception: %s' % str(e)) continue # if we reach", "function which always returns a list return fn(content=content, asset=asset) @staticmethod", "not expected string type ConfigBase.logger.error( 'Invalid Apprise configuration specified.') return", "may optionally associate an asset with the notification. \"\"\" #", "self.recursion > 0: # Attempt to acquire the schema at", "was an error or simply no data content = self.read(**kwargs)", "DEALINGS IN # THE SOFTWARE. import os import re import", "keyword. This keyword requires us to fetch more configuration from", "cache value ({}) was specified.'.format(cache) self.logger.warning(err) raise TypeError(err) return def", "recursion) results['cache'] = False # Recursion can never be parsed", "| global_tags else: # Just use the global settings _results['tag']", "recursion value self.recursion = recursion # Initialize our insecure_includes flag", "tokens def __getitem__(self, index): \"\"\" Returns the indexed server entry", "self.encoding = kwargs.get('encoding') if 'format' in kwargs \\ and isinstance(kwargs['format'],", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "detected. \"\"\" if config_format is None: # Detect the format", "return None # By default set our return value to", "configuration is prohibited. ' 'Ignoring URL {}'.format(schema, url)) continue #", "configuration files referenced. You may also optionally associate an asset", "'Invalid asset value to \"{}\".'.format(k)) continue # # global tag", "del entries['schema'] # support our special tokens (if they're present)", "results to return; use them return self._cached_servers # Our cached", "self._cached_servers # Our cached response object self._cached_servers = list() #", "True. \"\"\" super(ConfigBase, self).__init__(**kwargs) # Tracks the time the content", "file:// based configuration for example, only a file:// based configuration", "ConfigBase.logger.warning( 'Invalid asset key \"{}\".'.format(k)) continue if v is None:", "lumping this in # and associating it with the cache", "a function or non-string set value ConfigBase.logger.warning( 'Invalid asset key", "of charge, to any person obtaining a copy # of", "it = url.iteritems() else: # six.PY3 it = iter(url.items()) #", "# Just use the global settings _results['tag'] = global_tags for", "text file only containing a list of URLs. Return a", "and self.cache: # We have enough reason to look further", "list()) for line, entry in enumerate(content, start=1): result = valid_line_re.match(entry)", "there is no # reason we should be reading in", "in CONFIG_FORMATS: URLBase.logger.warning( 'Unsupported format specified {}'.format( results['format'])) del results['format']", "ConfigFormat.TEXT ConfigBase.logger.debug( 'Detected TEXT configuration ' 'based on line {}.'.format(line))", "have a comment entry # Adjust default format to TEXT", "asset=asset) self._cached_servers.extend(servers) # Configuration files were detected; recursively populate them", "self.logger.warning(err) raise TypeError(err) return def servers(self, asset=None, **kwargs): \"\"\" Performs", "= None # Initialize our recursion value self.recursion = recursion", "effectivley look up what these keywords map to their appropriate", "B is to assume we're dealing with a file schema", "@staticmethod def detect_config_format(content, **kwargs): \"\"\" Takes the specified content and", "insecure_includes flag self.insecure_includes = insecure_includes if 'encoding' in kwargs: #", "our global set results.append(_results) else: # Unsupported ConfigBase.logger.warning( 'Unsupported Apprise", "time.time() - self._cached_time if age_in_sec <= self.cache: # We have", "if schema is None: # Plan B is to assume", "valid schema of a supported plugin type - tokens must", "# Some basic validation if schema not in SCHEMA_MAP: ConfigBase.logger.warning(", "if self.config_format is None else self.config_format # Dynamically load our", "referenced. You may also optionally associate an asset with the", "need our configuration object del cfg_plugin else: self.logger.debug( 'Recursion limit", "else: # we must set strings with a string ConfigBase.logger.warning(", "in self.schemas() and not self.insecure_includes) or \\ SCHEMA_MAP[schema].allow_cross_includes == \\", "the encoding self.encoding = kwargs.get('encoding') if 'format' in kwargs \\", "of loaded URL ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url())) except Exception as e:", "it's integer value try: results['cache'] = int(results['qsd']['cache']) except (ValueError, TypeError):", "'Invalid Apprise TEXT based configuration specified.') return (list(), list()) for", "set to True. Returns: A dictionary is returned containing the", "as +,-, and : - schema must be a valid", "Python 2.x based 'if statement'. True is returned if our", "self._cached_time = time.time() return self._cached_servers def read(self): \"\"\" This object", "line comments # # One or more tags can be", "if self._cached_servers: self.logger.info('Loaded {} entries from {}'.format( len(self._cached_servers), self.url())) else:", "some child classes will later use to verify SSL keys", "k, a in _results.items()]))) # Prepare our Asset Object _results['asset']", "this does mean more then one call can be made", "previously retrieved can exist for before it should be considered", "utf-8 -*- # # Copyright (C) 2020 <NAME> <<EMAIL>> #", "stack return self._cached_servers.pop(index) @staticmethod def __extract_special_tokens(schema, tokens): \"\"\" This function", "a list of tokens and updates them to no longer", "self._cached_servers.pop(index) @staticmethod def __extract_special_tokens(schema, tokens): \"\"\" This function takes a", "screen and take action # otherwise. return (list(), list()) url,", "is None: ConfigBase.logger.warning( 'Unparseable URL {}, entry #{}'.format( url, no", "# we must set strings with a string ConfigBase.logger.warning( 'Invalid", "loaded our data servers.append(plugin) return (servers, configs) def pop(self, index=-1):", "always returns a tuple # of our servers and our", "list of content we can pull from self.servers() return self._cached_servers[index]", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "from memory (in a string format) that contains 'include' entries", "schema must be a valid schema of a supported plugin", "initialize itself as None config_format = None # Don't read", "= plugins.SCHEMA_MAP[results['schema']](**results) # Create log entry of loaded URL ConfigBase.logger.debug('Loaded", "if content should be retrieved. \"\"\" if isinstance(self._cached_servers, list) and", "do nothing continue if config: ConfigBase.logger.debug('Include URL: {}'.format(config)) # Store", "else int(cache) if self.cache < 0: err = 'A negative", "set(parse_list(_results['tag'])) | global_tags else: # Just use the global settings", "references this makes no difference at all. But for remote", "# Handle cross inclusion based on allow_cross_includes rules if (SCHEMA_MAP[schema].allow_cross_includes", "'An invalid config format ({}) was specified.'.format( self.config_format) self.logger.warning(err) raise", "(list(), list()) for line, entry in enumerate(content, start=1): result =", "def read(self): \"\"\" This object should be implimented by the", "schema worthy of tracking; store it's # details: _results =", "# parsed URL information plugin = plugins.SCHEMA_MAP[_results['schema']](**_results) # Create log", "+,-, and : - schema must be a valid schema", "this case we want to iterate over all of our", "parse_url() for entries in tokens: # Copy ourselves a template", "dictionary with our new entries r.update(entries) # add our results", "'Could not load Apprise YAML configuration ' 'entry #{}, item", "if isinstance(url, six.string_types): # Support a single inline string or", "object self._cached_servers = list() # read() causes the child class", "can't set a function or non-string set value ConfigBase.logger.warning( 'Invalid", "any special tokens such as +,-, and : - schema", "#{}'.format(no + 1)) continue # Track our entries entry =", "Apprise TEXT configuration format found ' '{} on line {}.'.format(entry,", "a problem; we simply have no urls urls = list()", "# The below ensures our tags are set correctly if", "where the user provided some of the special keywords. We", "otherwise None is returned. \"\"\" results = URLBase.parse_url(url, verify_host=verify_host) if", "= \\ set(parse_list(_results['tag'])) | global_tags else: # Just use the", "# Test our schema _schema = GET_SCHEMA_RE.match(key) if _schema is", "this software and associated documentation files(the \"Software\"), to deal #", "of a supported plugin type - tokens must be a", "our url return results # Allow overriding the default config", "includes = result.get('include', None) if isinstance(includes, six.string_types): # Support a", "del _results[key] ConfigBase.logger.trace( 'URL #{}: {} unpacked as:{}{}' .format(no +", "self.cache = cache if isinstance(cache, bool) else int(cache) if self.cache", "'YAML Exception:{}{}'.format(os.linesep, e)) return (list(), list()) if not isinstance(result, dict):", "out any tokens we know that we can't accept and", "from self.servers() # Pop the element off of the stack", "self.cache: # We have not expired; return False return False", "(list, tuple, set)): # populate and/or override any results populated", "the Software, and to permit persons to whom the Software", "= 'An invalid config format ({}) was specified.'.format( self.config_format) self.logger.warning(err)", "URL and returns it broken apart into a dictionary. This", "it as a bool # instead: results['cache'] = parse_bool(results['qsd']['cache']) return", "tuple)): # Not a problem; we simply have no includes", "iter(url.items()) # Track the URL to-load _url = None #", "Support a single inline string or multiple ones separated by", "# config file at least has something to take action", "into memory as there is no # reason we should", "content = re.split(r'\\r*\\n', content) except TypeError: # content was not", "and attempts to detect the format type The function returns", "asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset() tokens =", "our entry count entry += 1 # Grab our first", "reading in more. This is more of a safe guard", "configuration can include another file:// based one. because it is", "are invalid or can not be used. self.logger.warning( 'Could not", "schema of a supported plugin type - tokens must be", "2.x based 'if statement'. True is returned if our content", "isinstance(url, dict): # Store the url and ignore arguments associated", "Pop the element off of the stack return self._cached_servers.pop(index) @staticmethod", "of servers loaded \"\"\" if not isinstance(self._cached_servers, list): # Generate", "load our parse_ function based on our config format fn", "to errors if six.PY2: it = url.iteritems() else: # six.PY3", "{}'.format( len(self._cached_servers), self.url())) else: self.logger.warning( 'Failed to load Apprise configuration", "handling of relative include config_path = os.getcwd() def __init__(self, cache=True,", "os import re import six import yaml import time from", "A list of additional configuration files referenced using # the", "set results.append(_results) elif isinstance(url, dict): # We are a url", "ConfigBase.logger.warning( 'Could not load URL {} on line {}.'.format( url,", "token ({}) found in YAML ' 'configuration entry #{}, item", "# config plugin to load the data source and return", "we want to convert the specified string to # match", "return self._cached_servers # Our cached response object self._cached_servers = list()", "(SCHEMA_MAP[schema].allow_cross_includes == ContentIncludeMode.STRICT and schema not in self.schemas() and not", "plugin to load the data source and return unparsed content", "specified content and attempts to detect the format type The", "to parse our url return results # Allow overriding the", "the MIT License. # # Permission is hereby granted, free", "URL information cfg_plugin = SCHEMA_MAP[results['schema']](**results) except Exception as e: #", "URL ConfigBase.logger.warning( 'Unparseable URL {} on line {}.'.format(url, line)) continue", "re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(?P<text>((?P<tag>[ \\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|' r'((?P<yaml>[a-z0-9]+):.*))?$', re.I) try: # split our", "tell the end user what entries were ignored # due", "None: # the loop above failed to match anything ConfigBase.logger.warning(", "Regex _url = key if _url is None: # the", "TypeError(err) except (ValueError, TypeError): err = 'An invalid cache value", "Default Encoding to use if not otherwise detected encoding =", "our first item _results = results.pop(0) # tag is a", "First try to get it's integer value try: results['cache'] =", "is to assume we're dealing with a file schema =", "entry #{}, item #{}' .format(key, no + 1, entry)) del", "__len__(self): \"\"\" Returns the total number of servers loaded \"\"\"", "def parse_url(url, verify_host=True): \"\"\"Parses the URL and returns it broken", "wrapped in an Python 2.x based 'if statement'. True is", "file:// based configuration can include another file:// based one. because", "= fn(content=content, asset=asset) self._cached_servers.extend(servers) # Configuration files were detected; recursively", "use the `include` keyword. This keyword requires us to fetch", "line {}.'.format(line)) break elif result.group('text'): config_format = ConfigFormat.TEXT ConfigBase.logger.debug( 'Detected", "1) if version != 1: # Invalid syntax ConfigBase.logger.error( 'Invalid", "_results is None: ConfigBase.logger.warning( 'Unparseable URL {}, entry #{}'.format( url,", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "them. <Tag(s)>=<URL> # Or you can use this format (no", "our results to our global set results.append(_results) elif isinstance(url, dict):", "be in STRICT mode are treated as being in ALWAYS", "loading if insecure base protocols ConfigBase.logger.warning( 'Including {}:// based configuration", "to take action # with. ConfigBase.logger.warning( 'Invalid URL {}, entry", "Allows the Apprise object to be wrapped in an Python", "'Including {}:// based configuration is prohibited. ' 'Ignoring URL {}'.format(schema,", "index=-1): \"\"\" Removes an indexed Notification Service from the stack", "line {}.'.format(url, line)) continue # Build a list of tags", "type specified ConfigBase.logger.error( 'An invalid configuration format ({}) was specified'.format(", "Software is # furnished to do so, subject to the", "this amount of data into memory as there is no", "A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE #", "use the global settings _results['tag'] = global_tags for key in", "this value must be set to True. \"\"\" super(ConfigBase, self).__init__(**kwargs)", "deep. If set to zero it is off. There is", "from ..utils import parse_bool from ..utils import parse_urls from .", "attempted to include a file:// one it woul fail. However", "load include URL: {}'.format(url)) self.logger.debug('Loading Exception: {}'.format(str(e))) continue # if", "should be considered as expired or False if content should", "which always returns a tuple # of our servers and", "data. This method can be somewhat inefficient if disabled. Only", "the format type The function returns the actual format type", "whom the Software is # furnished to do so, subject", "is licensed under the MIT License. # # Permission is", "an early exit return None # Attempt to detect configuration", "list() # Iterate over each URL for no, url in", "file to our list self._cached_servers.extend( cfg_plugin.servers(asset=asset)) # We no longer", "data (safely) result = yaml.load(content, Loader=yaml.SafeLoader) except (AttributeError, yaml.parser.ParserError, yaml.error.MarkedYAMLError)", "Exception as e: # the arguments are invalid or can", ".format(no + 1, url, os.linesep, os.linesep.join( ['{}=\"{}\"'.format(k, a) for k,", "low if you do intend to use it. insecure_include by", "# Plan B is to assume we're dealing with a", "include URL: {}'.format(url)) self.logger.debug('Loading Exception: {}'.format(str(e))) continue # if we", "get our # content again. age_in_sec = time.time() - self._cached_time", "e: # Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML data specified.')", "# We have not expired; return False return False #", "# Detect Format Logic: # - A pound/hashtag (#) is", "# Store our URL and Schema Regex _url = key", "SCHEMA_MAP[schema].allow_cross_includes == \\ ContentIncludeMode.NEVER: # Prevent the loading if insecure", "specified.'.format( self.config_format) self.logger.warning(err) raise TypeError(err) # Set our cache flag;", "if we reach here, we can now add this servers", "notification servers \"\"\" if not isinstance(self._cached_servers, list): # Generate ourselves", "be retrieved again. For local file references this makes no", "config format if 'format' in results['qsd']: results['format'] = results['qsd'].get('format') if", "schema.group('schema').lower() # Some basic validation if schema not in SCHEMA_MAP:", "# tag is a special keyword that is managed by", "or end # with an underscore ConfigBase.logger.warning( 'Ignored asset key", "match anything ConfigBase.logger.warning( 'Unsupported URL, entry #{}'.format(no + 1)) continue", "Our cache value if 'cache' in results['qsd']: # First try", "the sub-modules default_config_format = ConfigFormat.TEXT # This is only set", "/ or sell # copies of the Software, and to", "results is None: # Failed to parse the server URL", "reach here, we have a comment entry # Adjust default", "= next(iter(url.items())) # Tags you just can't over-ride if 'schema'", "child class to do whatever it takes for the #", "recursion defines how deep we recursively handle entries that use", "failed to match anything ConfigBase.logger.warning( 'Unsupported URL, entry #{}'.format(no +", "shall be included in # all copies or substantial portions", "the parsed URL which some child classes will later use", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "to detect it # stop the moment a the type", "\"\"\" # The Default Encoding to use if not otherwise", "from ..utils import parse_urls from . import SCHEMA_MAP # Test", "this class. By default we cache our responses so that", "# matched. # - If we find a string followed", "None on each iteration results = list() if isinstance(url, six.string_types):", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "format isn't specified, then it is auto detected. \"\"\" if", "content again. age_in_sec = time.time() - self._cached_time if age_in_sec <=", "otherwise it returns None \"\"\" # Detect Format Logic: #", "= set() tags = result.get('tag', None) if tags and isinstance(tags,", "conditions : # # The above copyright notice and this", "schema is None: # Plan B is to assume we're", "licensed under the MIT License. # # Permission is hereby", "merge, publish, distribute, sublicense, and / or sell # copies", "verify SSL keys (if SSL transactions take place). Unless under", "not os.path.isabs(url): # We're dealing with a relative path; prepend", "r'(\\s*(?P<tags>[^=]+)=|=)?\\s*' r'(?P<url>[a-z0-9]{2,9}://.*)|' r'include\\s+(?P<config>.+))?\\s*$', re.I) try: # split our content up", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN", "enfored. config_format = \\ self.default_config_format \\ if self.config_format is None", "if we reach here, we successfully loaded our data servers.append(plugin)", "results.pop(0) # tag is a special keyword that is managed", "CONFIG_FORMATS: # Invalid configuration type specified ConfigBase.logger.error( 'An invalid configuration", "retrieved again. For local file references this makes no difference", "..common import CONFIG_FORMATS from ..common import ContentIncludeMode from ..utils import", "config_format = None # Don't read any more of this", "mode. Take a file:// based configuration for example, only a", "simply have no urls urls = list() # Iterate over", "{}, entry #{}'.format(url, no + 1)) continue # We found", "0: err = 'A negative cache value ({}) was specified.'.format(", "log entry of loaded URL ConfigBase.logger.debug( 'Loaded URL: {}'.format(plugin.url())) except", "_results = results.pop(0) # tag is a special keyword that", "dealing with a TEXT format. # Define what a valid", "detected by the sub-modules default_config_format = ConfigFormat.TEXT # This is", "not isinstance(urls, (list, tuple)): # Not a problem; we simply", "associated documentation files(the \"Software\"), to deal # in the Software", "AppriseAsset from ..URLBase import URLBase from ..common import ConfigFormat from", "# it one level results['recursion'] = self.recursion - 1 #", "should be retrieved. \"\"\" if isinstance(self._cached_servers, list) and self.cache: #", "must be a valid schema of a supported plugin type", "(AttributeError, yaml.parser.ParserError, yaml.error.MarkedYAMLError) as e: # Invalid content ConfigBase.logger.error( 'Invalid", "guard then # anything else. 128KB (131072B) max_buffer_size = 131072", "a single inline string or multiple ones separated by a", "the # parsed URL information cfg_plugin = SCHEMA_MAP[results['schema']](**results) except Exception", "time our content was cached at self._cached_time = time.time() #", "'Invalid asset key \"{}\".'.format(k)) continue if v is None: #", "content, this does mean more then one call can be", "isinstance(v, six.string_types): # Set our asset object with the new", "with the loaded notification servers \"\"\" if not isinstance(self._cached_servers, list):", "'Ignoring invalid token ({}) found in YAML ' 'configuration entry", "not includable using the 'include' # line found in configuration", "the number of seconds the previously retrieved can exist for", "our parsed URL as a base to # work with", "tokens provided in a YAML file where the user provided", "cases where we are not caching our response and are", "config_format=None, **kwargs): \"\"\" Takes the specified config content and loads", "file:// based one. because it is set to STRICT mode.", "# iterate over each line of the file to attempt", "to include a file:// one it woul fail. However this", "of our plugin using the # parsed URL information plugin", "tokens (if they're present) if schema in plugins.SCHEMA_MAP: entries =", "The Default Encoding to use if not otherwise detected encoding", "entry of loaded URL ConfigBase.logger.debug( 'Loaded URL: {}'.format(plugin.url())) except Exception", "of a safe guard then # anything else. 128KB (131072B)", "lower case schema = schema.group('schema').lower() # Some basic validation if", "is yet config_format = None # iterate over each line", "options. In # this case we want to iterate over", "Define what a valid line should look like valid_line_re =", "# Update our entries tokens[kw].update(matches) # Return our tokens return", "# Configuration files were detected; recursively populate them # If", "isinstance(url, six.string_types): # Support a single inline string or multiple", "cache if isinstance(cache, bool) else int(cache) if self.cache < 0:", "config parse function which always returns a tuple # of", "# Our cache value if 'cache' in results['qsd']: # First", "config # config file at least has something to take", "ConfigBase(URLBase): \"\"\" This is the base class for all supported", "# Our Configuration format uses a default if one wasn't", "Apprise configuration from {}'.format( self.url())) # Set the time our", "does not cause the content to be retrieved again. For", "used. self.logger.warning( 'Could not load include URL: {}'.format(url)) self.logger.debug('Loading Exception:", "if tags and isinstance(tags, (list, tuple, six.string_types)): # Store any", "schema in plugins.SCHEMA_MAP: entries = ConfigBase.__extract_special_tokens( schema, entries) # Extend", "new entries r.update(entries) # add our results to our global", "six.PY3 it = iter(url.items()) # Track the URL to-load _url", "1)) continue _results = plugins.url_to_dict(_url) if _results is None: #", "the schema at the very least to allow # our", "# six.PY3 it = iter(url.items()) # Track the URL to-load", "considered as expired or False if content should be retrieved.", "line should look like valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(\\s*(?P<tags>[^=]+)=|=)?\\s*' r'(?P<url>[a-z0-9]{2,9}://.*)|'", "Invalid content ConfigBase.logger.error( 'Invalid Apprise YAML data specified.') ConfigBase.logger.debug( 'YAML", "= ConfigBase.__extract_special_tokens( schema, entries) # Extend our dictionary with our", "r'include\\s+(?P<config>.+))?\\s*$', re.I) try: # split our content up to read", "retrieve also has an `include` reference, we will only advance", "can include another file:// based one. because it is set", "a file we shouldn't be parsing. It's owner # can", "global_tags = set() tags = result.get('tag', None) if tags and", "format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Execute our config parse", "\\ and isinstance(kwargs['format'], six.string_types): # Store the enforced config format", "tags can be idenified using comma's (,) to separate #", "to verify SSL keys (if SSL transactions take place). Unless", "r = _results.copy() # add our result set r.update(tokens) #", "# Convert to an empty string v = '' if", "use the keyword 'include' and identify a # configuration location", "and are required to # re-retrieve our settings. self._cached_time =", "- servers contains a list of loaded notification plugins -", "detect it # stop the moment a the type has", "= set(parse_list(tags)) # # include root directive # includes =", "url.keys()) # # urls root directive # urls = result.get('urls',", "removed. \"\"\" if not isinstance(self._cached_servers, list): # Generate ourselves a", "results to our global set results.append(_results) elif isinstance(url, dict): #", "the information parsed from our URL results = SCHEMA_MAP[schema].parse_url(url) if", "dict): # support our special tokens (if they're present) if", "our plugin using the # parsed URL information cfg_plugin =", "+ 1, entry)) ConfigBase.logger.debug('Loading Exception: %s' % str(e)) continue #", "want these 'include' entries to be honored, this value must", "assume we're dealing with a file schema = 'file' if", "1, entry)) del _results[key] ConfigBase.logger.trace( 'URL #{}: {} unpacked as:{}{}'", "ContentIncludeMode from ..utils import GET_SCHEMA_RE from ..utils import parse_list from", "with additional unescaped options if isinstance(entries, dict): if six.PY2: _url,", "# strip out processed tokens tokens = {k: v for", "Copyright (C) 2020 <NAME> <<EMAIL>> # All rights reserved. #", "YAML Version version = result.get('version', 1) if version != 1:", "import re import six import yaml import time from ..", "format fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Initialize our asset object", "url, line)) ConfigBase.logger.debug('Loading Exception: %s' % str(e)) continue # if", "include keyword configs = list() # Define what a valid", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "# Return what was loaded return (servers, configs) @staticmethod def", "self.config_format not in CONFIG_FORMATS: # Simple error checking err =", "= plugins.url_to_dict(url) if _results is None: ConfigBase.logger.warning( 'Unparseable URL {},", "whether we will get our # content again. age_in_sec =", "file where the user provided some of the special keywords.", "to # work with r = _results.copy() # add our", "to no longer include any special tokens such as +,-,", "= kwargs.get('format').lower() if self.config_format not in CONFIG_FORMATS: # Simple error", "= re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(?P<text>((?P<tag>[ \\t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|' r'((?P<yaml>[a-z0-9]+):.*))?$', re.I) try: # split", "getattr(ConfigBase, 'config_parse_{}'.format(config_format)) # Execute our config parse function which always", "entries['schema'] # support our special tokens (if they're present) if", "we can't accept and # warn the user match =", "#{}: {} unpacked as:{}{}' .format(no + 1, url, os.linesep, os.linesep.join(", "the Software is # furnished to do so, subject to", "would be recommended to keep it low if you do", "= result.group('url'), result.group('config') if not (url or config): # Comment/empty", "was last retrieved on. This place a role # for", "Detect the format config_format = ConfigBase.detect_config_format(content) if not config_format: #", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "cause the content to be retrieved again. For local file", "Take a file:// based configuration for example, only a file://", "list return fn(content=content, asset=asset) @staticmethod def config_parse_text(content, asset=None): \"\"\" Parse", "limitation the rights # to use, copy, modify, merge, publish,", "and loaded. \"\"\" if not self.expired(): # We already have", "do whatever it takes for the # config plugin to", "ConfigBase.logger.debug('Include URL: {}'.format(config)) # Store our include line configs.append(config.strip()) continue", "or can not be used. ConfigBase.logger.warning( 'Could not load URL", "the payload if 'encoding' in results['qsd']: results['encoding'] = results['qsd'].get('encoding') #", "'Could not load URL {} on line {}.'.format( url, line))", "License. # # Permission is hereby granted, free of charge,", "list of content we can pull from self.servers() # Pop", "= key if _url is None: # the loop above", "%s' % str(e)) continue # if we reach here, we", "if any were set results['tag'] = set(parse_list(result.group('tags'))) # Prepare our", "== ContentIncludeMode.STRICT and schema not in self.schemas() and not self.insecure_includes)", "The default expected configuration format unless otherwise # detected by", "specified'.format( config_format)) return (list(), list()) # Dynamically load our parse_", "# # This code is licensed under the MIT License.", "import URLBase from ..common import ConfigFormat from ..common import CONFIG_FORMATS", "URL: {}'.format(config)) # Store our include line configs.append(config.strip()) continue #", "in tokens.items() if k.startswith(prefix)} if not matches: # we're done", "3.x based 'if statement'. True is returned if our content", "{}'.format( results['format'])) del results['format'] # Defines the encoding of the", "our Asset Object _results['asset'] = asset try: # Attempt to", "for k, v in tokens.items() if k.startswith(prefix)} if not matches:", "our response and are required to # re-retrieve our settings.", "__getitem__(self, index): \"\"\" Returns the indexed server entry associated with", "that you leave this default value set to True. Returns:", "} if isinstance(tokens, (list, tuple, set)): # populate and/or override", "# Setup dictionary _results = { # Minimum requirements 'schema':", "This function takes a list of tokens and updates them", "**kwargs): \"\"\" Takes the specified config content and loads it", "results['insecure_includes'] = self.insecure_includes try: # Attempt to create an instance", "# A list of loaded Notification Services servers = list()", "inline string or multiple ones separated by a # comma", "just can't over-ride if 'schema' in entries: del entries['schema'] #", "copies or substantial portions of the Software. # # THE", "if the configuration should be considered as expired or False", "= _results.copy() # add our result set r.update(tokens) # add", "self._cached_servers def read(self): \"\"\" This object should be implimented by", "if isinstance(asset, AppriseAsset) else AppriseAsset() tokens = result.get('asset', None) if", "have enough reason to look further into our cached content", "to our global set results.append(r) else: # add our results", "base # to work with r = _results.copy() # We", "to be honored, this value must be set to True.", "if isinstance(includes, six.string_types): # Support a single inline string or", "return self._cached_servers # Our Configuration format uses a default if", "# Unsupported ConfigBase.logger.warning( 'Unsupported Apprise YAML entry #{}'.format(no + 1))", "131072 # By default all configuration is not includable using", "configs) @staticmethod def config_parse_yaml(content, asset=None): \"\"\" Parse the specified content", "dict): if six.PY2: _url, tokens = next(url.iteritems()) else: # six.PY3", "statement'. True is returned if our content was downloaded correctly.", "then one call can be made to retrieve the (same)", "value. It would be recommended to keep it low if", "parsed and loaded. \"\"\" if not self.expired(): # We already", "fn(content=content, asset=asset) @staticmethod def config_parse_text(content, asset=None): \"\"\" Parse the specified", "(list, tuple)): # Not a problem; we simply have no", "dictionary tokens = tokens.copy() for kw, meta in plugins.SCHEMA_MAP[schema]\\ .template_kwargs.items():", "our recursion value self.recursion = recursion # Initialize our insecure_includes", "integer try: self.cache = cache if isinstance(cache, bool) else int(cache)", "six.string_types): # Store the enforced config format self.config_format = kwargs.get('format').lower()", "list of additional configuration files referenced using # the include", "Apprise. Return a tuple that looks like (servers, configs) where:", "notification. \"\"\" # A list of loaded Notification Services servers", "if isinstance(url, six.string_types): # We're just a simple URL string...", "'configuration entry #{}, item #{}' .format(key, no + 1, entry))", "_results = { # Minimum requirements 'schema': schema, } if", "sources \"\"\" # The Default Encoding to use if not", "our responses so that subsiquent calls does not cause the", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "'Could not load include URL: {}'.format(url)) self.logger.debug('Loading Exception: {}'.format(str(e))) continue", "self._cached_servers.extend(servers) # Configuration files were detected; recursively populate them #", "url (str): The URL you want to fully parse. verify_host", "Attempt to detect configuration if result.group('yaml'): config_format = ConfigFormat.YAML ConfigBase.logger.debug(", "it # stop the moment a the type has been", "os.linesep, os.linesep.join( ['{}=\"{}\"'.format(k, a) for k, a in _results.items()]))) #", "URL information plugin = plugins.SCHEMA_MAP[results['schema']](**results) # Create log entry of", "Initialize our asset object asset = asset if isinstance(asset, AppriseAsset)", "servers loaded \"\"\" if not isinstance(self._cached_servers, list): # Generate ourselves", "look like valid_line_re = re.compile( r'^\\s*(?P<line>([;#]+(?P<comment>.*))|' r'(\\s*(?P<tags>[^=]+)=|=)?\\s*' r'(?P<url>[a-z0-9]{2,9}://.*)|' r'include\\s+(?P<config>.+))?\\s*$', re.I)", "elif isinstance(tokens, dict): # support our special tokens (if they're", "+ 1)) continue # We found a valid schema worthy", "each URL for no, url in enumerate(urls): # Our results", "schema is None: # Log invalid entries so that maintainer", "configuration should be considered # missing and/or expired. return True", "entry #{}'.format(no + 1)) continue # Track our entries entry", "global asset object # asset = asset if isinstance(asset, AppriseAsset)", ": - schema must be a valid schema of a", "auto detected. \"\"\" if config_format is None: # Detect the", "pull from self.servers() return True if self._cached_servers else False def", "from our URL results = SCHEMA_MAP[schema].parse_url(url) if not results: #", "create an instance of our plugin using the # parsed", "is always in lower case schema = schema.group('schema').lower() # Some", "ourselves a template of our parsed URL as a base", "copy, modify, merge, publish, distribute, sublicense, and / or sell", "pound/hashtag (#) is alawys a comment character so we skip", "return unparsed content # None is returned if there was", "ConfigFormat.YAML ConfigBase.logger.debug( 'Detected YAML configuration ' 'based on line {}.'.format(line))", "was specified'.format( config_format)) return (list(), list()) # Dynamically load our", "url string with additional unescaped options. In # this case", "configuration files referenced. You may optionally associate an asset with", "with r = _results.copy() # add our result set r.update(tokens)", "# furnished to do so, subject to the following conditions", "tokens we know that we can't accept and # warn", "isinstance(tokens, dict): for k, v in tokens.items(): if k.startswith('_') or", "recursively handle entries that use the `include` keyword. This keyword", "fully parsed if successful, otherwise None is returned. \"\"\" results", "a list return fn(content=content, asset=asset) @staticmethod def config_parse_text(content, asset=None): \"\"\"", "= { # Minimum requirements 'schema': schema, } if isinstance(tokens,", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "# instead: results['cache'] = parse_bool(results['qsd']['cache']) return results @staticmethod def detect_config_format(content,", "content = self.read(**kwargs) if not isinstance(content, six.string_types): # Set the", "Software, and to permit persons to whom the Software is", "not be used. self.logger.warning( 'Could not load include URL: {}'.format(url))", "implimented by the child classes \"\"\" return None def expired(self):", "specified {}'.format( results['format'])) del results['format'] # Defines the encoding of", "valid_line_re.match(entry) if not result: # Invalid syntax ConfigBase.logger.error( 'Undetectable Apprise", "list()) # Dynamically load our parse_ function based on our", "k.endswith('_'): # Entries are considered reserved if they start or", "simply have no includes includes = list() # Iterate over", "urls = result.get('urls', None) if not isinstance(urls, (list, tuple)): #", "the list is removed. \"\"\" if not isinstance(self._cached_servers, list): #", "read(self): \"\"\" This object should be implimented by the child", "NONINFRINGEMENT.IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "{}:// based configuration is prohibited. ' 'Ignoring URL {}'.format(schema, url))", "responses so that subsiquent calls does not cause the content", "are a url string with additional unescaped options if isinstance(entries,", "str(v) for k, v in tokens.items() if k.startswith(prefix)} if not", "= list() # read() causes the child class to do", "the 'include' # line found in configuration files. allow_cross_includes =", "# - If we find a string followed by a", "global_tags = set(parse_list(tags)) # # include root directive # includes", "keyword 'include' and identify a # configuration location (like this", "to do so for url in configs: if self.recursion >", "specified.') ConfigBase.logger.debug( 'YAML Exception:{}{}'.format(os.linesep, e)) return (list(), list()) if not", "'Invalid Apprise YAML version specified {}.'.format(version)) return (list(), list()) #", "class to do whatever it takes for the # config", ".. import plugins from ..AppriseAsset import AppriseAsset from ..URLBase import", "a supported plugin type - tokens must be a dictionary", "we have not expired, return False return False # Verify", "keyword that is managed by Apprise object. # The below", "only set if the user overrides the config format on", "used. ConfigBase.logger.warning( 'Could not load Apprise YAML configuration ' 'entry", "URL: %s' % url) if self._cached_servers: self.logger.info('Loaded {} entries from", "returned. \"\"\" results = URLBase.parse_url(url, verify_host=verify_host) if not results: #", "marked to be in STRICT mode are treated as being", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO", "GET_SCHEMA_RE.match(url) if schema is None: # Log invalid entries so", "plugins.SCHEMA_MAP: tokens = ConfigBase.__extract_special_tokens( schema, tokens) # Copy ourselves a", "data into memory as there is no # reason we", "load URL {} on line {}.'.format( url, line)) ConfigBase.logger.debug('Loading Exception:", "error printed to screen and take action # otherwise. return", "set r.update(tokens) # add our results to our global set", "'encoding' in kwargs: # Store the encoding self.encoding = kwargs.get('encoding')", "loop above failed to match anything ConfigBase.logger.warning( 'Unsupported URL, entry", "format config_format = ConfigBase.detect_config_format(content) if not config_format: # We couldn't", "off of the stack return self._cached_servers.pop(index) @staticmethod def __extract_special_tokens(schema, tokens):", "ConfigBase.logger.warning( 'Could not load Apprise YAML configuration ' 'entry #{},", "return config_format @staticmethod def config_parse(content, asset=None, config_format=None, **kwargs): \"\"\" Takes", "what entries were ignored # due to errors if six.PY2:", "'schema': schema, } if isinstance(tokens, (list, tuple, set)): # populate", "amount of data into memory as there is no #", "return None def expired(self): \"\"\" Simply returns True if the", "can use this format (no tags associated) <URL> # you", "URL: {}'.format(url)) self.logger.debug('Loading Exception: {}'.format(str(e))) continue # if we reach", "#{}, item #{}' .format(key, no + 1, entry)) del _results[key]", "from ..common import ContentIncludeMode from ..utils import GET_SCHEMA_RE from ..utils", "def servers(self, asset=None, **kwargs): \"\"\" Performs reads loaded configuration and", "One or more tags can be idenified using comma's (,)", "Set the time our content was cached at self._cached_time =", "parsing. It's owner # can read the error printed to", "Exception:{}{}'.format(os.linesep, e)) return (list(), list()) if not isinstance(result, dict): #", "the URL to-load _url = None # Track last acquired", "1: # Invalid syntax ConfigBase.logger.error( 'Invalid Apprise YAML version specified", "config_format = ConfigBase.detect_config_format(content) if not config_format: # We couldn't detect", "if _results is None: # Setup dictionary _results = {", "reach here, we can now add this servers found #", "reached; ignoring Include URL: %s' % url) if self._cached_servers: self.logger.info('Loaded", "Copy ourselves a template of our parsed URL as a", "set results.append(r) elif isinstance(tokens, dict): # support our special tokens", "results['format'])) del results['format'] # Defines the encoding of the payload", "list of content we can pull from self.servers() return True", "tokens such as +,-, and : - schema must be", "if isinstance(entries, dict): if six.PY2: _url, tokens = next(url.iteritems()) else:", "we're done with this entry continue if not isinstance(tokens.get(kw, None),", "our existing compilation. If the file we remotely retrieve also", "expired, return False return False # Verify our cache time", "an indexed Notification Service from the stack and returns it.", "we cache our responses so that subsiquent calls does not", "obtaining a copy # of this software and associated documentation", "know we're dealing # with a YAML file. # -", "for all supported configuration sources \"\"\" # The Default Encoding", "= ContentIncludeMode.NEVER # the config path manages the handling of", "the URL fully parsed if successful, otherwise None is returned.", "line by line content = re.split(r'\\r*\\n', content) except TypeError: #", "is # furnished to do so, subject to the following", "to whom the Software is # furnished to do so,", "\\ if self.config_format is None else self.config_format # Dynamically load", "results['format'] = results['qsd'].get('format') if results['format'] not in CONFIG_FORMATS: URLBase.logger.warning( 'Unsupported", "# Attempt to create an instance of our plugin using", "'Invalid Apprise YAML data specified.') ConfigBase.logger.debug( 'YAML Exception:{}{}'.format(os.linesep, e)) return", "len(self._cached_servers) def __bool__(self): \"\"\" Allows the Apprise object to be", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "a list of tags to associate with the newly added", "one. because it is set to STRICT mode. If an", "files referenced. You may optionally associate an asset with the", "time the content was last retrieved on. This place a", "You may optionally associate an asset with the notification. \"\"\"", "recursion is set to 2 deep. If set to zero", "= self.recursion - 1 # Insecure Includes flag can never", "# parsed URL information plugin = plugins.SCHEMA_MAP[results['schema']](**results) # Create log", "tag root directive # global_tags = set() tags = result.get('tag',", "the loop above failed to match anything ConfigBase.logger.warning( 'Unsupported URL,", "or can not be used. ConfigBase.logger.warning( 'Could not load Apprise", "server list \"\"\" if not isinstance(self._cached_servers, list): # Generate ourselves", "The above copyright notice and this permission notice shall be", "cache is required because we're just lumping this in #", "to our list self._cached_servers.extend( cfg_plugin.servers(asset=asset)) # We no longer need", "when loaded. include <ConfigURL> \"\"\" # A list of loaded", "cases where a self hosting apprise developer may wish to", "Track last acquired schema schema = None for key, tokens", "tokens = next(url.iteritems()) else: # six.PY3 _url, tokens = next(iter(url.items()))", "global settings _results['tag'] = global_tags for key in list(_results.keys()): #", "no limit to how high you set this value. It", "can alternatively set the cache value to an int identifying", "syntax ConfigBase.logger.error( 'Undetectable Apprise configuration found ' 'based on line", "= url.iteritems() else: # six.PY3 it = iter(url.items()) # Track", "by # parse_url() for entries in tokens: # Copy ourselves", "no, url in enumerate(urls): # Our results object is what", "is no # reason we should be reading in more.", "\"\"\" # Create a copy of our dictionary tokens =", "in enumerate(includes): if isinstance(url, six.string_types): # Support a single inline", "tokens in it: # Test our schema _schema = GET_SCHEMA_RE.match(key)", "takes a list of tokens and updates them to no", "(url or config): # Comment/empty line; do nothing continue if", "the specified content as though it were a simple text", "import six import yaml import time from .. import plugins", "url.iteritems() else: # six.PY3 it = iter(url.items()) # Track the", "our global set results.append(_results) elif isinstance(url, dict): # We are", "if there was an error or simply no data content", "isinstance(tokens.get(kw, None), dict): # Invalid; correct it tokens[kw] = dict()", "mode. If an http:// based configuration file attempted to include", "reads loaded configuration and returns all of the services that", "be recommended to keep it low if you do intend" ]
[ "AV_LOG_QUIET = \"quiet\" AV_LOG_PANIC = \"panic\" AV_LOG_FATAL = \"fatal\" AV_LOG_ERROR", "\"panic\" AV_LOG_FATAL = \"fatal\" AV_LOG_ERROR = \"error\" AV_LOG_WARNING = \"warning\"", "\"info\" AV_LOG_VERBOSE = \"verbose\" AV_LOG_DEBUG = \"debug\" ffmpeg_loglevel = AV_LOG_ERROR", "'win32' in str(sys.platform).lower() SUBPROCESS_ARGS = {} if IS_WIN32: startupinfo =", "IS_WIN32: startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow", "subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE SUBPROCESS_ARGS['startupinfo'] = startupinfo def", "startupinfo def popen_ffmpeg(inner_args): cmd = [ 'ffmpeg', *inner_args, # logging", "= {} if IS_WIN32: startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE", "= subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE SUBPROCESS_ARGS['startupinfo'] = startupinfo", "in str(sys.platform).lower() SUBPROCESS_ARGS = {} if IS_WIN32: startupinfo = subprocess.STARTUPINFO()", "\"error\" AV_LOG_WARNING = \"warning\" AV_LOG_INFO = \"info\" AV_LOG_VERBOSE = \"verbose\"", "startupinfo.wShowWindow = subprocess.SW_HIDE SUBPROCESS_ARGS['startupinfo'] = startupinfo def popen_ffmpeg(inner_args): cmd =", "[ 'ffmpeg', *inner_args, # logging '-loglevel', ffmpeg_loglevel, '-hide_banner', ] process", "# logging '-loglevel', ffmpeg_loglevel, '-hide_banner', ] process = Popen(cmd, stdout=PIPE,", "ffmpeg_loglevel = AV_LOG_ERROR IS_WIN32 = 'win32' in str(sys.platform).lower() SUBPROCESS_ARGS =", "SUBPROCESS_ARGS = {} if IS_WIN32: startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags =", "\"quiet\" AV_LOG_PANIC = \"panic\" AV_LOG_FATAL = \"fatal\" AV_LOG_ERROR = \"error\"", "subprocess from subprocess import Popen, PIPE AV_LOG_QUIET = \"quiet\" AV_LOG_PANIC", "stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS) stdout, stderr = process.communicate() print(stderr.decode(), end='', file=sys.stderr)", "import subprocess from subprocess import Popen, PIPE AV_LOG_QUIET = \"quiet\"", "'ffmpeg', *inner_args, # logging '-loglevel', ffmpeg_loglevel, '-hide_banner', ] process =", "startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE SUBPROCESS_ARGS['startupinfo'] =", "subprocess import Popen, PIPE AV_LOG_QUIET = \"quiet\" AV_LOG_PANIC = \"panic\"", "ffmpeg_loglevel, '-hide_banner', ] process = Popen(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS) stdout,", "AV_LOG_INFO = \"info\" AV_LOG_VERBOSE = \"verbose\" AV_LOG_DEBUG = \"debug\" ffmpeg_loglevel", "popen_ffmpeg(inner_args): cmd = [ 'ffmpeg', *inner_args, # logging '-loglevel', ffmpeg_loglevel,", "subprocess.SW_HIDE SUBPROCESS_ARGS['startupinfo'] = startupinfo def popen_ffmpeg(inner_args): cmd = [ 'ffmpeg',", "= \"info\" AV_LOG_VERBOSE = \"verbose\" AV_LOG_DEBUG = \"debug\" ffmpeg_loglevel =", "Popen, PIPE AV_LOG_QUIET = \"quiet\" AV_LOG_PANIC = \"panic\" AV_LOG_FATAL =", "= \"fatal\" AV_LOG_ERROR = \"error\" AV_LOG_WARNING = \"warning\" AV_LOG_INFO =", "Popen(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS) stdout, stderr = process.communicate() print(stderr.decode(), end='',", "AV_LOG_PANIC = \"panic\" AV_LOG_FATAL = \"fatal\" AV_LOG_ERROR = \"error\" AV_LOG_WARNING", "*inner_args, # logging '-loglevel', ffmpeg_loglevel, '-hide_banner', ] process = Popen(cmd,", "from subprocess import Popen, PIPE AV_LOG_QUIET = \"quiet\" AV_LOG_PANIC =", "SUBPROCESS_ARGS['startupinfo'] = startupinfo def popen_ffmpeg(inner_args): cmd = [ 'ffmpeg', *inner_args,", "def popen_ffmpeg(inner_args): cmd = [ 'ffmpeg', *inner_args, # logging '-loglevel',", "= \"panic\" AV_LOG_FATAL = \"fatal\" AV_LOG_ERROR = \"error\" AV_LOG_WARNING =", "import sys import subprocess from subprocess import Popen, PIPE AV_LOG_QUIET", "startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow =", "AV_LOG_ERROR = \"error\" AV_LOG_WARNING = \"warning\" AV_LOG_INFO = \"info\" AV_LOG_VERBOSE", "AV_LOG_FATAL = \"fatal\" AV_LOG_ERROR = \"error\" AV_LOG_WARNING = \"warning\" AV_LOG_INFO", "stderr=PIPE, **SUBPROCESS_ARGS) stdout, stderr = process.communicate() print(stderr.decode(), end='', file=sys.stderr) return", "stdout, stderr = process.communicate() print(stderr.decode(), end='', file=sys.stderr) return stdout, stderr", "PIPE AV_LOG_QUIET = \"quiet\" AV_LOG_PANIC = \"panic\" AV_LOG_FATAL = \"fatal\"", "AV_LOG_DEBUG = \"debug\" ffmpeg_loglevel = AV_LOG_ERROR IS_WIN32 = 'win32' in", "= 'win32' in str(sys.platform).lower() SUBPROCESS_ARGS = {} if IS_WIN32: startupinfo", "if IS_WIN32: startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW", "import Popen, PIPE AV_LOG_QUIET = \"quiet\" AV_LOG_PANIC = \"panic\" AV_LOG_FATAL", "'-hide_banner', ] process = Popen(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS) stdout, stderr", "= AV_LOG_ERROR IS_WIN32 = 'win32' in str(sys.platform).lower() SUBPROCESS_ARGS = {}", "= \"quiet\" AV_LOG_PANIC = \"panic\" AV_LOG_FATAL = \"fatal\" AV_LOG_ERROR =", "**SUBPROCESS_ARGS) stdout, stderr = process.communicate() print(stderr.decode(), end='', file=sys.stderr) return stdout,", "AV_LOG_WARNING = \"warning\" AV_LOG_INFO = \"info\" AV_LOG_VERBOSE = \"verbose\" AV_LOG_DEBUG", "{} if IS_WIN32: startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE |", "\"debug\" ffmpeg_loglevel = AV_LOG_ERROR IS_WIN32 = 'win32' in str(sys.platform).lower() SUBPROCESS_ARGS", "process = Popen(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS) stdout, stderr = process.communicate()", "= \"error\" AV_LOG_WARNING = \"warning\" AV_LOG_INFO = \"info\" AV_LOG_VERBOSE =", "sys import subprocess from subprocess import Popen, PIPE AV_LOG_QUIET =", "logging '-loglevel', ffmpeg_loglevel, '-hide_banner', ] process = Popen(cmd, stdout=PIPE, stderr=PIPE,", "'-loglevel', ffmpeg_loglevel, '-hide_banner', ] process = Popen(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS)", "\"verbose\" AV_LOG_DEBUG = \"debug\" ffmpeg_loglevel = AV_LOG_ERROR IS_WIN32 = 'win32'", "str(sys.platform).lower() SUBPROCESS_ARGS = {} if IS_WIN32: startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags", "subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE SUBPROCESS_ARGS['startupinfo'] = startupinfo def popen_ffmpeg(inner_args): cmd", "= \"warning\" AV_LOG_INFO = \"info\" AV_LOG_VERBOSE = \"verbose\" AV_LOG_DEBUG =", "= \"debug\" ffmpeg_loglevel = AV_LOG_ERROR IS_WIN32 = 'win32' in str(sys.platform).lower()", "= startupinfo def popen_ffmpeg(inner_args): cmd = [ 'ffmpeg', *inner_args, #", "cmd = [ 'ffmpeg', *inner_args, # logging '-loglevel', ffmpeg_loglevel, '-hide_banner',", "= subprocess.SW_HIDE SUBPROCESS_ARGS['startupinfo'] = startupinfo def popen_ffmpeg(inner_args): cmd = [", "IS_WIN32 = 'win32' in str(sys.platform).lower() SUBPROCESS_ARGS = {} if IS_WIN32:", "= subprocess.STARTUPINFO() startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE", "AV_LOG_ERROR IS_WIN32 = 'win32' in str(sys.platform).lower() SUBPROCESS_ARGS = {} if", "= [ 'ffmpeg', *inner_args, # logging '-loglevel', ffmpeg_loglevel, '-hide_banner', ]", "= \"verbose\" AV_LOG_DEBUG = \"debug\" ffmpeg_loglevel = AV_LOG_ERROR IS_WIN32 =", "| subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE SUBPROCESS_ARGS['startupinfo'] = startupinfo def popen_ffmpeg(inner_args):", "subprocess.STARTUPINFO() startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE SUBPROCESS_ARGS['startupinfo']", "AV_LOG_VERBOSE = \"verbose\" AV_LOG_DEBUG = \"debug\" ffmpeg_loglevel = AV_LOG_ERROR IS_WIN32", "= Popen(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS) stdout, stderr = process.communicate() print(stderr.decode(),", "\"warning\" AV_LOG_INFO = \"info\" AV_LOG_VERBOSE = \"verbose\" AV_LOG_DEBUG = \"debug\"", "] process = Popen(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS) stdout, stderr =", "\"fatal\" AV_LOG_ERROR = \"error\" AV_LOG_WARNING = \"warning\" AV_LOG_INFO = \"info\"" ]
[ "setuptools import setup setup( name=\"nmn-iwp\", version=\"0.1\", keywords=\"\", packages=[\"vr\", \"vr.models\"] )", "from setuptools import setup setup( name=\"nmn-iwp\", version=\"0.1\", keywords=\"\", packages=[\"vr\", \"vr.models\"]" ]
[ "prepare_add(self, num_add_nodes): \"\"\" Make sure that ``num_add_nodes`` can be added", "are subdivided their identity is not lost. \"\"\" tri_nums =", "the uniformly spaced grid points x_grid = np.arange(x_min, x_max +", "veh_up = np.array([0., 1., 0.]) rot_around = np.cross(veh_up, np.array([0, 0,", "are using some of the functions b_x_root = BSP_Element(b_tree.tris, b_tree)", "z_min, z_max = 1e30, -1e30 for key, veh_surf in veh_surfs.items():", "is not lost. \"\"\" tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1)) minus_ones", "= b_above_y b_x_root = b_above_x return voxel_data ############# # Main", "x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords, settings) voxel_data = {\"x_grid\": x_grid,", "minus_ones, tri_nums)) self.allocate_step = allocate_step self.node_array = node_array # Reference", "VOXEL_LABELS, SETTINGS) from mayavi import mlab xo, yo, zo =", "= np.zeros(len(self.tris), dtype=np.int32) def _resize(self): \"\"\" Increase node array size", "if y_pos > max_y: break b_above_y, b_below_y = b_y_root.split_at(1, y_pos)", "\"z_grid\": z_grid, \"vox_size\": settings[\"voxel_size\"], \"csys_trans\": tr_mat, \"value\": None} for key,", "break b_above_z, b_below_z = b_z_root.split_at(2, z_pos) if not (b_below_z and", "veh_surf[\"y\"] = veh_surf['nodes'][:, 1] veh_surf[\"z\"] = veh_surf['nodes'][:, 2] return veh_surf", "+ self.allocate_step self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3)))) def add_node(self, node):", "self._resize() return self.next_free def make_grid(veh_surfs, settings): \"\"\" Make coordinates of", "None a voxel array is created and returned. \"\"\" nodes", "array \"\"\" veh_surf[\"nodes\"] = np.vstack((veh_surf[\"x\"], veh_surf[\"y\"], veh_surf[\"z\"])).T veh_surf['nodes'] = np.dot(veh_surf['nodes'],", "vox_size, np.max(surf[\"x\"]) + vox_size min_y, max_y = np.min(surf[\"y\"]) - vox_size,", "x_max = min(x_min, np.min(veh_surf[\"x\"])), max(x_max, np.max(veh_surf[\"x\"])) y_min, y_max = min(y_min,", "np.zeros((self.allocate_step, 3)))) def add_node(self, node): \"\"\" Adds a new node", "== self.array_size: self._resize() self.node_array[self.next_free] = node self.next_free += 1 return", "< min_z: continue if z_pos > max_z: break b_above_z, b_below_z", "def convert_geom(veh_surf, tr_mat): \"\"\" Rotate nodes using provided transformation matrix;", "anything not in an identified set surf_mask = 1 for", "self.node_array = node_array # Reference to the full list of", "enumeration so that even when they are subdivided their identity", "Main code def main(vehicle_comp_coords, tr_mat, voxel_masks, settings): \"\"\" Perform voxelization", "self._resize() self.next_free = len(node_array) self.split_cache = np.zeros(len(self.tris), dtype=np.int32) def _resize(self):", "= find_occupied_voxels(veh_surf, surf_mask, voxel_data) return voxel_data if __name__ == \"__main__\":", "Represent a triangulated surface using a 3D boolean grid\"\"\" import", "b_tree) size_i, size_j, size_k = len(x_pts), len(y_pts), len(z_pts) ## Create", "adding nodes from within cython where resizing is tricky. \"\"\"", "- 1 def prepare_add(self, num_add_nodes): \"\"\" Make sure that ``num_add_nodes``", "= {\"Manikin\"} # Special labels applied to specific types of", "+ vox_size b_tree = BSP_Grid(nodes, tris) # Create BSP tree", "the end of the node array (expanding if required). Returns", "nodes using provided transformation matrix; convert xyz node dict to", "len(z_pts) ## Create the occupied voxels if none were supplied", "box veh_surf = convert_geom(veh_surf, tr_mat) x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords,", "0.5, 0.5), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo, yo, zo =", "x_pos > max_x: break b_above_x, b_below_x = b_x_root.split_at(0, x_pos) b_y_root", "x_min - settings[\"voxel_size\"], x_max + settings[\"voxel_size\"] y_min, y_max = y_min", "k] |= surf_mask b_z_root = b_above_z b_y_root = b_above_y b_x_root", "None: voxel_data[\"value\"] = np.zeros((size_i - 1, size_j - 1, size_k", "settings[\"voxel_size\"], settings[\"voxel_size\"]) return x_grid, y_grid, z_grid def convert_geom(veh_surf, tr_mat): \"\"\"", "# Create the uniformly spaced grid points x_grid = np.arange(x_min,", "coords so object aligns with cartesian axes of occ voxel", "triangulated surface using a 3D boolean grid\"\"\" import logging import", "vox_veh_file, True) except: voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) from", "1 for mask, geo_set in voxel_masks.items(): if veh_surf['part_class'] in geo_set:", "return voxel_data if __name__ == \"__main__\": from rpl.tools.api import test_bench_api", "= -np.arccos(veh_up[2]) tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang) # voxel_data = main(vehicle_surfs,", "n voxels in a given direction we need n-1 splits", "\"vox_size\": settings[\"voxel_size\"], \"csys_trans\": tr_mat, \"value\": None} for key, veh_surf in", "tris, allocate_step=100000): \"\"\" Store the triangles with an enumeration so", "\"voxels_{}_vox{}_hacked\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) try: voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True) except: voxel_data", "the occupied voxels if none were supplied if voxel_data[\"value\"] is", "6), dtype=np.int32) self.tris = np.hstack((tris, minus_ones, tri_nums)) self.allocate_step = allocate_step", "0] veh_surf[\"y\"] = veh_surf['nodes'][:, 1] veh_surf[\"z\"] = veh_surf['nodes'][:, 2] return", "a given direction we need n-1 splits for i, x_pos", "np.max(veh_surf[\"x\"])) y_min, y_max = min(y_min, np.min(veh_surf[\"y\"])), max(y_max, np.max(veh_surf[\"y\"])) z_min, z_max", "the node array (expanding if required). Returns the index of", "bounding box x_min, x_max = 1e30, -1e30 y_min, y_max =", "y_grid, z_grid = make_grid(vehicle_comp_coords, settings) voxel_data = {\"x_grid\": x_grid, \"y_grid\":", "= tb_api.load_settings(\"settings.js\") DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'} HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'}", "rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element from rpl.tools.geometry import geom_utils import data_io class", "- 1, size_k - 1), dtype=np.uint32) occupied_voxels = voxel_data[\"value\"] ##", "vox_size b_tree = BSP_Grid(nodes, tris) # Create BSP tree elements-", "= mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.9, 0.9, 0.9), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube',", "8) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.6, 0.6, 1.0), scale_mode=\"none\",", "extents of this part min_x, max_x = np.min(surf[\"x\"]) - vox_size,", "np.max(veh_surf[\"z\"])) x_min, x_max = x_min - settings[\"voxel_size\"], x_max + settings[\"voxel_size\"]", "key, veh_surf in vehicle_comp_coords.items(): # Convert coordinates and find overall", "occupied voxels if none were supplied if voxel_data[\"value\"] is None:", "z axis and sfc normal veh_up = np.array([0., 1., 0.])", "y_pos > max_y: break b_above_y, b_below_y = b_y_root.split_at(1, y_pos) b_z_root", "= 1e30, -1e30 y_min, y_max = 1e30, -1e30 z_min, z_max", "no need to plot them # xo, yo, zo =", "np.arange(y_min, y_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) z_grid = np.arange(z_min, z_max +", "None: break if y_pos < min_y: continue if y_pos >", "== 0)): ## There is at least part of triangle", "1] veh_surf[\"z\"] = veh_surf['nodes'][:, 2] return veh_surf def find_occupied_voxels(surf, surf_mask,", "\"csys_trans\": tr_mat, \"value\": None} for key, veh_surf in vehicle_comp_coords.items(): #", "scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo, yo, zo = np.where(voxel_data[\"value\"] & 8)", "even when they are subdivided their identity is not lost.", "= len(self.node_array) + self.allocate_step self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3)))) def", "yo, zo = np.where(voxel_data[\"value\"] == 1) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo],", "the supplied ``occupied_voxels`` is None a voxel array is created", "supplied if voxel_data[\"value\"] is None: voxel_data[\"value\"] = np.zeros((size_i - 1,", "if not (b_below_z and (len(b_below_z.tris) == 0)): ## There is", "voxel_data[\"z_grid\"][zo], color=(0.6, 0.6, 1.0), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) # No", "min(x_min, np.min(veh_surf[\"x\"])), max(x_max, np.max(veh_surf[\"x\"])) y_min, y_max = min(y_min, np.min(veh_surf[\"y\"])), max(y_max,", "vox_size = voxel_data[\"vox_size\"] ## Find the local extents of this", "opacity=1) # No manikins included, no need to plot them", "later without needing a resize. Useful if adding nodes from", "# Save the voxelated model of the vehicle (sans door", "BSP_Element from rpl.tools.geometry import geom_utils import data_io class BSP_Grid(object): def", "labels applied to specific types of voxels VOXEL_LABELS = {2:", "-np.arccos(veh_up[2]) tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang) # voxel_data = main(vehicle_surfs, tr_mat,", "+ settings[\"voxel_size\"] ########################################### # Create the uniformly spaced grid points", "and (len(b_below_z.tris) == 0)): ## There is at least part", "\"\"\" self.array_size = len(self.node_array) + self.allocate_step self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step,", "key, veh_surf in vehicle_comp_coords.items(): # Build up the voxel_data logging.debug(\"Sampling", "b_above_z, b_below_z = b_z_root.split_at(2, z_pos) if not (b_below_z and (len(b_below_z.tris)", "= np.array([0., 1., 0.]) rot_around = np.cross(veh_up, np.array([0, 0, 1]))", "continue if y_pos > max_y: break b_above_y, b_below_y = b_y_root.split_at(1,", "1)) minus_ones = -np.ones((len(tris), 6), dtype=np.int32) self.tris = np.hstack((tris, minus_ones,", "with ``group_mask``. If the supplied ``occupied_voxels`` is None a voxel", "j, y_pos in enumerate(y_pts[1:]): if b_y_root is None: break if", "= [voxel_data[k] for k in (\"x_grid\", \"y_grid\", \"z_grid\")] vox_size =", "SETTINGS) from mayavi import mlab xo, yo, zo = np.where(voxel_data[\"value\"]", "mode='cube', opacity=1) # No manikins included, no need to plot", "import numpy as np from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element from rpl.tools.geometry", "voxel array is created and returned. \"\"\" nodes = surf[\"nodes\"]", "min_x: continue if x_pos > max_x: break b_above_x, b_below_x =", "np.hstack((tris, minus_ones, tri_nums)) self.allocate_step = allocate_step self.node_array = node_array #", "# Reference to the full list of nodes self._resize() self.next_free", "enumerate(y_pts[1:]): if b_y_root is None: break if y_pos < min_y:", "continue if x_pos > max_x: break b_above_x, b_below_x = b_x_root.split_at(0,", "b_x_root = BSP_Element(b_tree.tris, b_tree) size_i, size_j, size_k = len(x_pts), len(y_pts),", "b_below_x = b_x_root.split_at(0, x_pos) b_y_root = b_below_x for j, y_pos", "plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.6, 0.6, 1.0), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"],", "np.min(surf[\"y\"]) - vox_size, np.max(surf[\"y\"]) + vox_size min_z, max_z = np.min(surf[\"z\"])", "enumerate(x_pts[1:]): if x_pos < min_x: continue if x_pos > max_x:", "1 for anything not in an identified set surf_mask =", "self.next_free def make_grid(veh_surfs, settings): \"\"\" Make coordinates of voxelated grid", "= 1 for mask, geo_set in voxel_masks.items(): if veh_surf['part_class'] in", "some of the functions b_x_root = BSP_Element(b_tree.tris, b_tree) size_i, size_j,", "max(z_max, np.max(veh_surf[\"z\"])) x_min, x_max = x_min - settings[\"voxel_size\"], x_max +", "- vox_size, np.max(surf[\"x\"]) + vox_size min_y, max_y = np.min(surf[\"y\"]) -", "None: break if z_pos < min_z: continue if z_pos >", "tr_mat) x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords, settings) voxel_data = {\"x_grid\":", "zo = np.where(voxel_data[\"value\"] == 1) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo],", "of this part min_x, max_x = np.min(surf[\"x\"]) - vox_size, np.max(surf[\"x\"])", "with an enumeration so that even when they are subdivided", "for mask, geo_set in voxel_masks.items(): if veh_surf['part_class'] in geo_set: surf_mask", "def __init__(self, node_array, tris, allocate_step=100000): \"\"\" Store the triangles with", "1) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.9, 0.9, 0.9), scale_mode=\"none\",", "based on overall list of vehicle surfaces \"\"\" ## Find", "num_add_nodes >= self.array_size: self._resize() return self.next_free def make_grid(veh_surfs, settings): \"\"\"", "the voxel_data logging.debug(\"Sampling component: {}\".format(key)) ## Default mask is 1", "plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], # voxel_data[\"y_grid\"][yo], # voxel_data[\"z_grid\"][zo], # color=(0.5, 1.0,", "geo_set: surf_mask |= mask voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data) return", "not in an identified set surf_mask = 1 for mask,", "z_max = min(z_min, np.min(veh_surf[\"z\"])), max(z_max, np.max(veh_surf[\"z\"])) x_min, x_max = x_min", "tr_mat, voxel_masks, settings): \"\"\" Perform voxelization for all vehicle geometries", "np.min(surf[\"x\"]) - vox_size, np.max(surf[\"x\"]) + vox_size min_y, max_y = np.min(surf[\"y\"])", "code def main(vehicle_comp_coords, tr_mat, voxel_masks, settings): \"\"\" Perform voxelization for", "xo, yo, zo = np.where(voxel_data[\"value\"] & 4) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo],", "from rpl.tools.geometry import geom_utils import data_io class BSP_Grid(object): def __init__(self,", "nodes = surf[\"nodes\"] tris = surf[\"tris\"] x_pts, y_pts, z_pts =", "Find the local extents of this part min_x, max_x =", "= convert_geom(veh_surf, tr_mat) x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords, settings) voxel_data", "4: DOORS, 8: HATCHES, 16: MANIKINS} vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() -", "them # xo, yo, zo = np.where(voxel_data[\"value\"] & 16) #", "Create BSP tree elements- we're not using a tree, but", "'Hull_Assembly_Example_With_Connector'} MANIKINS = {\"Manikin\"} # Special labels applied to specific", "surf_mask, voxel_data): \"\"\" Voxels with any triangle from ``surf`` are", "Default mask is 1 for anything not in an identified", "veh_surf = convert_geom(veh_surf, tr_mat) x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords, settings)", "b_y_root = b_above_y b_x_root = b_above_x return voxel_data ############# #", "# plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], # voxel_data[\"y_grid\"][yo], # voxel_data[\"z_grid\"][zo], # color=(0.5,", "self.array_size: self._resize() return self.next_free def make_grid(veh_surfs, settings): \"\"\" Make coordinates", "vehicle (sans door and other excluded parts) data_io.save_multi_array(vox_veh_folder, vox_veh_file, voxel_data)", "voxel_data = {\"x_grid\": x_grid, \"y_grid\": y_grid, \"z_grid\": z_grid, \"vox_size\": settings[\"voxel_size\"],", "np.dot(veh_surf['nodes'], tr_mat[:3, :3]) veh_surf[\"x\"] = veh_surf['nodes'][:, 0] veh_surf[\"y\"] = veh_surf['nodes'][:,", "min(z_min, np.min(veh_surf[\"z\"])), max(z_max, np.max(veh_surf[\"z\"])) x_min, x_max = x_min - settings[\"voxel_size\"],", "= np.arange(y_min, y_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) z_grid = np.arange(z_min, z_max", "b_below_y for k, z_pos in enumerate(z_pts[1:]): if b_z_root is None:", "geom_utils.rotation_about_vector(rot_around, rot_ang) # voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) vox_veh_folder", "max_x: break b_above_x, b_below_x = b_x_root.split_at(0, x_pos) b_y_root = b_below_x", "return veh_surf def find_occupied_voxels(surf, surf_mask, voxel_data): \"\"\" Voxels with any", "i, x_pos in enumerate(x_pts[1:]): if x_pos < min_x: continue if", "+ settings[\"voxel_size\"] y_min, y_max = y_min - settings[\"voxel_size\"], y_max +", "y_min - settings[\"voxel_size\"], y_max + settings[\"voxel_size\"] z_min, z_max = z_min", "the allocate_step amount. \"\"\" self.array_size = len(self.node_array) + self.allocate_step self.node_array", "min(y_min, np.min(veh_surf[\"y\"])), max(y_max, np.max(veh_surf[\"y\"])) z_min, z_max = min(z_min, np.min(veh_surf[\"z\"])), max(z_max,", "tr_mat[:3, :3]) veh_surf[\"x\"] = veh_surf['nodes'][:, 0] veh_surf[\"y\"] = veh_surf['nodes'][:, 1]", "in veh_surfs.items(): x_min, x_max = min(x_min, np.min(veh_surf[\"x\"])), max(x_max, np.max(veh_surf[\"x\"])) y_min,", "len(x_pts), len(y_pts), len(z_pts) ## Create the occupied voxels if none", "cartesian axes of occ voxel grid, +z=up # Vector to", "from mayavi import mlab xo, yo, zo = np.where(voxel_data[\"value\"] ==", "object aligns with cartesian axes of occ voxel grid, +z=up", "= min(x_min, np.min(veh_surf[\"x\"])), max(x_max, np.max(veh_surf[\"x\"])) y_min, y_max = min(y_min, np.min(veh_surf[\"y\"])),", "(expanding if required). Returns the index of the newly added", "surf[\"tris\"] x_pts, y_pts, z_pts = [voxel_data[k] for k in (\"x_grid\",", "z_pts = [voxel_data[k] for k in (\"x_grid\", \"y_grid\", \"z_grid\")] vox_size", "np.where(voxel_data[\"value\"] == 1) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.9, 0.9,", "# Create BSP tree elements- we're not using a tree,", "{\"Manikin\"} # Special labels applied to specific types of voxels", "self.next_free == self.array_size: self._resize() self.node_array[self.next_free] = node self.next_free += 1", "b_z_root = b_below_y for k, z_pos in enumerate(z_pts[1:]): if b_z_root", "np.where(voxel_data[\"value\"] & 2) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1, 1,", "is tricky. \"\"\" if self.next_free + num_add_nodes >= self.array_size: self._resize()", "= b_below_y for k, z_pos in enumerate(z_pts[1:]): if b_z_root is", "np.min(surf[\"z\"]) - vox_size, np.max(surf[\"z\"]) + vox_size b_tree = BSP_Grid(nodes, tris)", "voxel_data[\"z_grid\"][zo], color=(1, 1, 1), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=0.05) xo, yo,", "# Main code def main(vehicle_comp_coords, tr_mat, voxel_masks, settings): \"\"\" Perform", "# No manikins included, no need to plot them #", "we're not using a tree, but we are using some", "+= 1 return self.next_free - 1 def prepare_add(self, num_add_nodes): \"\"\"", "voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.6, 0.6, 1.0), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) #", "because to make n voxels in a given direction we", "voxel_masks, settings): \"\"\" Perform voxelization for all vehicle geometries in", "z_pos < min_z: continue if z_pos > max_z: break b_above_z,", "b_y_root = b_below_x for j, y_pos in enumerate(y_pts[1:]): if b_y_root", "make_grid(veh_surfs, settings): \"\"\" Make coordinates of voxelated grid based on", "nodes from within cython where resizing is tricky. \"\"\" if", "on overall list of vehicle surfaces \"\"\" ## Find overall", "def find_occupied_voxels(surf, surf_mask, voxel_data): \"\"\" Voxels with any triangle from", "xo, yo, zo = np.where(voxel_data[\"value\"] & 2) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo],", "tr_mat, \"value\": None} for key, veh_surf in vehicle_comp_coords.items(): # Build", "\"__main__\": from rpl.tools.api import test_bench_api as tb_api SETTINGS = tb_api.load_settings(\"settings.js\")", "make n voxels in a given direction we need n-1", "j, k] |= surf_mask b_z_root = b_above_z b_y_root = b_above_y", "b_x_root = b_above_x return voxel_data ############# # Main code def", "voxel_data) return voxel_data if __name__ == \"__main__\": from rpl.tools.api import", "## There is at least part of triangle here so", "z_grid, \"vox_size\": settings[\"voxel_size\"], \"csys_trans\": tr_mat, \"value\": None} for key, veh_surf", "z_max = 1e30, -1e30 for key, veh_surf in veh_surfs.items(): x_min,", "= mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1.0, 0.5, 0.5), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube',", "need to plot them # xo, yo, zo = np.where(voxel_data[\"value\"]", "b_above_x, b_below_x = b_x_root.split_at(0, x_pos) b_y_root = b_below_x for j,", "4) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1.0, 0.5, 0.5), scale_mode=\"none\",", "The [1:] is because to make n voxels in a", "settings[\"voxel_size\"], settings[\"voxel_size\"]) z_grid = np.arange(z_min, z_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) return", "Voxels with any triangle from ``surf`` are considered occupied and", "if voxel_data[\"value\"] is None: voxel_data[\"value\"] = np.zeros((size_i - 1, size_j", "not lost. \"\"\" tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1)) minus_ones =", "- settings[\"voxel_size\"], z_max + settings[\"voxel_size\"] ########################################### # Create the uniformly", "& 16) # plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], # voxel_data[\"y_grid\"][yo], # voxel_data[\"z_grid\"][zo],", "settings[\"voxel_size\"], y_max + settings[\"voxel_size\"] z_min, z_max = z_min - settings[\"voxel_size\"],", "size_j, size_k = len(x_pts), len(y_pts), len(z_pts) ## Create the occupied", "is None: voxel_data[\"value\"] = np.zeros((size_i - 1, size_j - 1,", "settings): \"\"\" Make coordinates of voxelated grid based on overall", "color=(0.6, 0.6, 1.0), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) # No manikins", "node self.next_free += 1 return self.next_free - 1 def prepare_add(self,", "max_y = np.min(surf[\"y\"]) - vox_size, np.max(surf[\"y\"]) + vox_size min_z, max_z", "bounding box veh_surf = convert_geom(veh_surf, tr_mat) x_grid, y_grid, z_grid =", "= np.where(voxel_data[\"value\"] & 8) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.6,", "if z_pos > max_z: break b_above_z, b_below_z = b_z_root.split_at(2, z_pos)", "- 1), dtype=np.uint32) occupied_voxels = voxel_data[\"value\"] ## The [1:] is", "b_y_root is None: break if y_pos < min_y: continue if", "= make_grid(vehicle_comp_coords, settings) voxel_data = {\"x_grid\": x_grid, \"y_grid\": y_grid, \"z_grid\":", "SETTINGS = tb_api.load_settings(\"settings.js\") DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'} HATCHES = {'Hatch_Assembly_Driver_Commander',", "the voxelated model of the vehicle (sans door and other", "voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.9, 0.9, 0.9), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo,", "x_min, x_max = x_min - settings[\"voxel_size\"], x_max + settings[\"voxel_size\"] y_min,", "current z axis and sfc normal veh_up = np.array([0., 1.,", "axes of occ voxel grid, +z=up # Vector to rotate", "= geom_utils.rotation_about_vector(rot_around, rot_ang) # voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS)", "mask is 1 for anything not in an identified set", "veh_surf def find_occupied_voxels(surf, surf_mask, voxel_data): \"\"\" Voxels with any triangle", "in (\"x_grid\", \"y_grid\", \"z_grid\")] vox_size = voxel_data[\"vox_size\"] ## Find the", "voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data) return voxel_data if __name__ ==", "``occupied_voxels`` is None a voxel array is created and returned.", "def main(vehicle_comp_coords, tr_mat, voxel_masks, settings): \"\"\" Perform voxelization for all", "\"\"\" veh_surf[\"nodes\"] = np.vstack((veh_surf[\"x\"], veh_surf[\"y\"], veh_surf[\"z\"])).T veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3,", "x_pos < min_x: continue if x_pos > max_x: break b_above_x,", "where resizing is tricky. \"\"\" if self.next_free + num_add_nodes >=", "\"\"\" Adds a new node to the end of the", "main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) vox_veh_folder = r\"voxelated_models/vehicles/{}/{}\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) vox_veh_file =", "def make_grid(veh_surfs, settings): \"\"\" Make coordinates of voxelated grid based", "here so mark as occupied occupied_voxels[i, j, k] |= surf_mask", "logging import numpy as np from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element from", "their identity is not lost. \"\"\" tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris),", "if self.next_free + num_add_nodes >= self.array_size: self._resize() return self.next_free def", "for k in (\"x_grid\", \"y_grid\", \"z_grid\")] vox_size = voxel_data[\"vox_size\"] ##", "rpl.tools.geometry import geom_utils import data_io class BSP_Grid(object): def __init__(self, node_array,", "__init__(self, node_array, tris, allocate_step=100000): \"\"\" Store the triangles with an", "dtype=np.int32) self.tris = np.hstack((tris, minus_ones, tri_nums)) self.allocate_step = allocate_step self.node_array", "= surf[\"tris\"] x_pts, y_pts, z_pts = [voxel_data[k] for k in", "voxels in a given direction we need n-1 splits for", "we need n-1 splits for i, x_pos in enumerate(x_pts[1:]): if", "the full list of nodes self._resize() self.next_free = len(node_array) self.split_cache", "len(node_array) self.split_cache = np.zeros(len(self.tris), dtype=np.int32) def _resize(self): \"\"\" Increase node", "len(y_pts), len(z_pts) ## Create the occupied voxels if none were", "enumerate(z_pts[1:]): if b_z_root is None: break if z_pos < min_z:", "occupied_voxels[i, j, k] |= surf_mask b_z_root = b_above_z b_y_root =", "triangle here so mark as occupied occupied_voxels[i, j, k] |=", "np from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element from rpl.tools.geometry import geom_utils import", "if none were supplied if voxel_data[\"value\"] is None: voxel_data[\"value\"] =", "identified set surf_mask = 1 for mask, geo_set in voxel_masks.items():", "1.0), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) # No manikins included, no", "\"y_grid\", \"z_grid\")] vox_size = voxel_data[\"vox_size\"] ## Find the local extents", "voxel_data[\"z_grid\"][zo], color=(0.9, 0.9, 0.9), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo, yo,", "vox_size min_z, max_z = np.min(surf[\"z\"]) - vox_size, np.max(surf[\"z\"]) + vox_size", "b_below_y = b_y_root.split_at(1, y_pos) b_z_root = b_below_y for k, z_pos", "Build up the voxel_data logging.debug(\"Sampling component: {}\".format(key)) ## Default mask", "VOXEL_LABELS = {2: HULLS, 4: DOORS, 8: HATCHES, 16: MANIKINS}", "tri_nums)) self.allocate_step = allocate_step self.node_array = node_array # Reference to", "model of the vehicle (sans door and other excluded parts)", "self.tris = np.hstack((tris, minus_ones, tri_nums)) self.allocate_step = allocate_step self.node_array =", "sfc normal veh_up = np.array([0., 1., 0.]) rot_around = np.cross(veh_up,", "for i, x_pos in enumerate(x_pts[1:]): if x_pos < min_x: continue", "spaced grid points x_grid = np.arange(x_min, x_max + settings[\"voxel_size\"], settings[\"voxel_size\"])", "self.array_size = len(self.node_array) + self.allocate_step self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3))))", "this part min_x, max_x = np.min(surf[\"x\"]) - vox_size, np.max(surf[\"x\"]) +", "specific types of voxels VOXEL_LABELS = {2: HULLS, 4: DOORS,", "= np.where(voxel_data[\"value\"] == 1) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.9,", "settings) voxel_data = {\"x_grid\": x_grid, \"y_grid\": y_grid, \"z_grid\": z_grid, \"vox_size\":", "y_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) z_grid = np.arange(z_min, z_max + settings[\"voxel_size\"],", "= np.dot(veh_surf['nodes'], tr_mat[:3, :3]) veh_surf[\"x\"] = veh_surf['nodes'][:, 0] veh_surf[\"y\"] =", "functions b_x_root = BSP_Element(b_tree.tris, b_tree) size_i, size_j, size_k = len(x_pts),", "= voxel_data[\"value\"] ## The [1:] is because to make n", "HULLS = {\"Hull_Assembly_Parametric\", 'Hull_Assembly_Example_With_Connector'} MANIKINS = {\"Manikin\"} # Special labels", "veh_surf[\"z\"])).T veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3]) veh_surf[\"x\"] = veh_surf['nodes'][:, 0]", "def _resize(self): \"\"\" Increase node array size by the allocate_step", "convert_geom(veh_surf, tr_mat): \"\"\" Rotate nodes using provided transformation matrix; convert", "np.where(voxel_data[\"value\"] & 16) # plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], # voxel_data[\"y_grid\"][yo], #", "plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1, 1, 1), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"],", "b_z_root.split_at(2, z_pos) if not (b_below_z and (len(b_below_z.tris) == 0)): ##", "voxel_data): \"\"\" Voxels with any triangle from ``surf`` are considered", "z_pos > max_z: break b_above_z, b_below_z = b_z_root.split_at(2, z_pos) if", "with cartesian axes of occ voxel grid, +z=up # Vector", "scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo, yo, zo = np.where(voxel_data[\"value\"] & 2)", "size by the allocate_step amount. \"\"\" self.array_size = len(self.node_array) +", "added later without needing a resize. Useful if adding nodes", "veh_surf in vehicle_comp_coords.items(): # Build up the voxel_data logging.debug(\"Sampling component:", "from ``surf`` are considered occupied and or'ed with ``group_mask``. If", "rpl.tools.api import test_bench_api as tb_api SETTINGS = tb_api.load_settings(\"settings.js\") DOORS =", "np.array([0., 1., 0.]) rot_around = np.cross(veh_up, np.array([0, 0, 1])) rot_ang", "mode='cube', opacity=0.05) xo, yo, zo = np.where(voxel_data[\"value\"] & 4) plot_vehicle", "as occupied occupied_voxels[i, j, k] |= surf_mask b_z_root = b_above_z", "self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3)))) def add_node(self, node): \"\"\" Adds", "max_z: break b_above_z, b_below_z = b_z_root.split_at(2, z_pos) if not (b_below_z", "need n-1 splits for i, x_pos in enumerate(x_pts[1:]): if x_pos", "import test_bench_api as tb_api SETTINGS = tb_api.load_settings(\"settings.js\") DOORS = {'Hatch_Assembly_Rear_Ramp',", "full list of nodes self._resize() self.next_free = len(node_array) self.split_cache =", "= {\"Hull_Assembly_Parametric\", 'Hull_Assembly_Example_With_Connector'} MANIKINS = {\"Manikin\"} # Special labels applied", "= \"voxels_{}_vox{}_hacked\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) try: voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True) except:", "xo, yo, zo = np.where(voxel_data[\"value\"] & 16) # plot_vehicle =", "dict to nodes array \"\"\" veh_surf[\"nodes\"] = np.vstack((veh_surf[\"x\"], veh_surf[\"y\"], veh_surf[\"z\"])).T", "= np.arange(z_min, z_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) return x_grid, y_grid, z_grid", "## Default mask is 1 for anything not in an", "voxel_data logging.debug(\"Sampling component: {}\".format(key)) ## Default mask is 1 for", "max(x_max, np.max(veh_surf[\"x\"])) y_min, y_max = min(y_min, np.min(veh_surf[\"y\"])), max(y_max, np.max(veh_surf[\"y\"])) z_min,", "= y_min - settings[\"voxel_size\"], y_max + settings[\"voxel_size\"] z_min, z_max =", "1 def prepare_add(self, num_add_nodes): \"\"\" Make sure that ``num_add_nodes`` can", "veh_surfs.items(): x_min, x_max = min(x_min, np.min(veh_surf[\"x\"])), max(x_max, np.max(veh_surf[\"x\"])) y_min, y_max", "tb_api.load_settings(\"settings.js\") DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'} HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'} HULLS", "= np.zeros((size_i - 1, size_j - 1, size_k - 1),", "> max_x: break b_above_x, b_below_x = b_x_root.split_at(0, x_pos) b_y_root =", "normal veh_up = np.array([0., 1., 0.]) rot_around = np.cross(veh_up, np.array([0,", "yo, zo = np.where(voxel_data[\"value\"] & 8) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo],", "or'ed with ``group_mask``. If the supplied ``occupied_voxels`` is None a", "< min_y: continue if y_pos > max_y: break b_above_y, b_below_y", "VOXEL_LABELS, SETTINGS) vox_veh_folder = r\"voxelated_models/vehicles/{}/{}\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) vox_veh_file = \"voxels_{}_vox{}_hacked\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"])", "for key, veh_surf in veh_surfs.items(): x_min, x_max = min(x_min, np.min(veh_surf[\"x\"])),", "# voxel_data[\"z_grid\"][zo], # color=(0.5, 1.0, 0.8), # scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], #", "size_k = len(x_pts), len(y_pts), len(z_pts) ## Create the occupied voxels", "the index of the newly added node. \"\"\" if self.next_free", "list of nodes self._resize() self.next_free = len(node_array) self.split_cache = np.zeros(len(self.tris),", "1), dtype=np.uint32) occupied_voxels = voxel_data[\"value\"] ## The [1:] is because", "voxel_data[\"z_grid\"][zo], # color=(0.5, 1.0, 0.8), # scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], # mode='cube',", "is because to make n voxels in a given direction", "if required). Returns the index of the newly added node.", "none were supplied if voxel_data[\"value\"] is None: voxel_data[\"value\"] = np.zeros((size_i", "surf_mask, voxel_data) return voxel_data if __name__ == \"__main__\": from rpl.tools.api", "BSP_Grid(nodes, tris) # Create BSP tree elements- we're not using", "+ settings[\"voxel_size\"], settings[\"voxel_size\"]) y_grid = np.arange(y_min, y_max + settings[\"voxel_size\"], settings[\"voxel_size\"])", "settings[\"voxel_size\"] y_min, y_max = y_min - settings[\"voxel_size\"], y_max + settings[\"voxel_size\"]", "node array (expanding if required). Returns the index of the", "Modify node coords so object aligns with cartesian axes of", "from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element from rpl.tools.geometry import geom_utils import data_io", "[1:] is because to make n voxels in a given", "16: MANIKINS} vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False) # Modify", "of the functions b_x_root = BSP_Element(b_tree.tris, b_tree) size_i, size_j, size_k", "at least part of triangle here so mark as occupied", "sure that ``num_add_nodes`` can be added later without needing a", "= np.arange(x_min, x_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) y_grid = np.arange(y_min, y_max", "overall list of vehicle surfaces \"\"\" ## Find overall bounding", "plot them # xo, yo, zo = np.where(voxel_data[\"value\"] & 16)", "BSP_Element(b_tree.tris, b_tree) size_i, size_j, size_k = len(x_pts), len(y_pts), len(z_pts) ##", "scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=0.05) xo, yo, zo = np.where(voxel_data[\"value\"] & 4)", "(b_below_z and (len(b_below_z.tris) == 0)): ## There is at least", "voxel grid, +z=up # Vector to rotate around is cross", "Perform voxelization for all vehicle geometries in a list of", "is at least part of triangle here so mark as", "nodes self._resize() self.next_free = len(node_array) self.split_cache = np.zeros(len(self.tris), dtype=np.int32) def", "= {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'} HULLS = {\"Hull_Assembly_Parametric\", 'Hull_Assembly_Example_With_Connector'} MANIKINS = {\"Manikin\"}", "0.6, 1.0), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) # No manikins included,", "is None a voxel array is created and returned. \"\"\"", "yo, zo = np.where(voxel_data[\"value\"] & 16) # plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo],", "Create the uniformly spaced grid points x_grid = np.arange(x_min, x_max", "triangle from ``surf`` are considered occupied and or'ed with ``group_mask``.", "voxel_data[\"value\"] is None: voxel_data[\"value\"] = np.zeros((size_i - 1, size_j -", "DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'} HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'} HULLS =", "# scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], # mode='cube', opacity=1.0) mlab.show() # Save the", "voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1.0, 0.5, 0.5), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo,", "= -np.ones((len(tris), 6), dtype=np.int32) self.tris = np.hstack((tris, minus_ones, tri_nums)) self.allocate_step", "key, veh_surf in veh_surfs.items(): x_min, x_max = min(x_min, np.min(veh_surf[\"x\"])), max(x_max,", "[voxel_data[k] for k in (\"x_grid\", \"y_grid\", \"z_grid\")] vox_size = voxel_data[\"vox_size\"]", "voxels if none were supplied if voxel_data[\"value\"] is None: voxel_data[\"value\"]", "test_bench_api as tb_api SETTINGS = tb_api.load_settings(\"settings.js\") DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'}", "if y_pos < min_y: continue if y_pos > max_y: break", "coordinates of voxelated grid based on overall list of vehicle", "= mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1, 1, 1), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube',", "can be added later without needing a resize. Useful if", "y_max = y_min - settings[\"voxel_size\"], y_max + settings[\"voxel_size\"] z_min, z_max", "size_j - 1, size_k - 1), dtype=np.uint32) occupied_voxels = voxel_data[\"value\"]", "veh_surf[\"y\"], veh_surf[\"z\"])).T veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3]) veh_surf[\"x\"] = veh_surf['nodes'][:,", "There is at least part of triangle here so mark", "allocate_step amount. \"\"\" self.array_size = len(self.node_array) + self.allocate_step self.node_array =", "# Modify node coords so object aligns with cartesian axes", "were supplied if voxel_data[\"value\"] is None: voxel_data[\"value\"] = np.zeros((size_i -", "def add_node(self, node): \"\"\" Adds a new node to the", "Find overall bounding box x_min, x_max = 1e30, -1e30 y_min,", "using a tree, but we are using some of the", "z_min - settings[\"voxel_size\"], z_max + settings[\"voxel_size\"] ########################################### # Create the", "occupied and or'ed with ``group_mask``. If the supplied ``occupied_voxels`` is", "rot_around = np.cross(veh_up, np.array([0, 0, 1])) rot_ang = -np.arccos(veh_up[2]) tr_mat", "using some of the functions b_x_root = BSP_Element(b_tree.tris, b_tree) size_i,", "MANIKINS, single_file=False) # Modify node coords so object aligns with", "of nodes self._resize() self.next_free = len(node_array) self.split_cache = np.zeros(len(self.tris), dtype=np.int32)", "MANIKINS = {\"Manikin\"} # Special labels applied to specific types", "self.next_free - 1 def prepare_add(self, num_add_nodes): \"\"\" Make sure that", "= np.min(surf[\"x\"]) - vox_size, np.max(surf[\"x\"]) + vox_size min_y, max_y =", "= main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) from mayavi import mlab xo,", "around is cross product of current z axis and sfc", "+ num_add_nodes >= self.array_size: self._resize() return self.next_free def make_grid(veh_surfs, settings):", "parts. Combine on a uniform grid. \"\"\" for key, veh_surf", "added node. \"\"\" if self.next_free == self.array_size: self._resize() self.node_array[self.next_free] =", "min_z, max_z = np.min(surf[\"z\"]) - vox_size, np.max(surf[\"z\"]) + vox_size b_tree", "= {\"x_grid\": x_grid, \"y_grid\": y_grid, \"z_grid\": z_grid, \"vox_size\": settings[\"voxel_size\"], \"csys_trans\":", "resize. Useful if adding nodes from within cython where resizing", "y_min, y_max = y_min - settings[\"voxel_size\"], y_max + settings[\"voxel_size\"] z_min,", "x_grid, \"y_grid\": y_grid, \"z_grid\": z_grid, \"vox_size\": settings[\"voxel_size\"], \"csys_trans\": tr_mat, \"value\":", "z_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) return x_grid, y_grid, z_grid def convert_geom(veh_surf,", "settings[\"voxel_size\"]) y_grid = np.arange(y_min, y_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) z_grid =", "we are using some of the functions b_x_root = BSP_Element(b_tree.tris,", "tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1)) minus_ones = -np.ones((len(tris), 6), dtype=np.int32)", "size_k - 1), dtype=np.uint32) occupied_voxels = voxel_data[\"value\"] ## The [1:]", "to the full list of nodes self._resize() self.next_free = len(node_array)", "Useful if adding nodes from within cython where resizing is", "local extents of this part min_x, max_x = np.min(surf[\"x\"]) -", "mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1, 1, 1), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=0.05)", "scale_factor=voxel_data[\"vox_size\"], # mode='cube', opacity=1.0) mlab.show() # Save the voxelated model", "Make coordinates of voxelated grid based on overall list of", "b_x_root.split_at(0, x_pos) b_y_root = b_below_x for j, y_pos in enumerate(y_pts[1:]):", "opacity=1.0) mlab.show() # Save the voxelated model of the vehicle", "break if z_pos < min_z: continue if z_pos > max_z:", "y_min, y_max = 1e30, -1e30 z_min, z_max = 1e30, -1e30", "x_pts, y_pts, z_pts = [voxel_data[k] for k in (\"x_grid\", \"y_grid\",", "1 return self.next_free - 1 def prepare_add(self, num_add_nodes): \"\"\" Make", "y_grid = np.arange(y_min, y_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) z_grid = np.arange(z_min,", "self.allocate_step = allocate_step self.node_array = node_array # Reference to the", "new node to the end of the node array (expanding", "veh_surf['nodes'][:, 1] veh_surf[\"z\"] = veh_surf['nodes'][:, 2] return veh_surf def find_occupied_voxels(surf,", "\"value\": None} for key, veh_surf in vehicle_comp_coords.items(): # Build up", "mode='cube', opacity=1.0) mlab.show() # Save the voxelated model of the", ":3]) veh_surf[\"x\"] = veh_surf['nodes'][:, 0] veh_surf[\"y\"] = veh_surf['nodes'][:, 1] veh_surf[\"z\"]", "array is created and returned. \"\"\" nodes = surf[\"nodes\"] tris", "voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True) except: voxel_data = main(vehicle_surfs, tr_mat,", "\"\"\" Perform voxelization for all vehicle geometries in a list", "= b_y_root.split_at(1, y_pos) b_z_root = b_below_y for k, z_pos in", "the newly added node. \"\"\" if self.next_free == self.array_size: self._resize()", "provided transformation matrix; convert xyz node dict to nodes array", "by the allocate_step amount. \"\"\" self.array_size = len(self.node_array) + self.allocate_step", "list of vehicle surfaces \"\"\" ## Find overall bounding box", "to plot them # xo, yo, zo = np.where(voxel_data[\"value\"] &", "|= surf_mask b_z_root = b_above_z b_y_root = b_above_y b_x_root =", "for anything not in an identified set surf_mask = 1", "settings[\"voxel_size\"], \"csys_trans\": tr_mat, \"value\": None} for key, veh_surf in vehicle_comp_coords.items():", "included, no need to plot them # xo, yo, zo", "If the supplied ``occupied_voxels`` is None a voxel array is", "settings[\"voxel_size\"], settings[\"voxel_size\"]) y_grid = np.arange(y_min, y_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) z_grid", "SETTINGS[\"voxel_size\"]) try: voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True) except: voxel_data =", "self.array_size: self._resize() self.node_array[self.next_free] = node self.next_free += 1 return self.next_free", "{2: HULLS, 4: DOORS, 8: HATCHES, 16: MANIKINS} vehicle_surfs =", "k in (\"x_grid\", \"y_grid\", \"z_grid\")] vox_size = voxel_data[\"vox_size\"] ## Find", "settings[\"voxel_size\"], z_max + settings[\"voxel_size\"] ########################################### # Create the uniformly spaced", "for key, veh_surf in vehicle_comp_coords.items(): # Build up the voxel_data", "= np.min(surf[\"y\"]) - vox_size, np.max(surf[\"y\"]) + vox_size min_z, max_z =", "########################################### # Create the uniformly spaced grid points x_grid =", "find_occupied_voxels(surf, surf_mask, voxel_data): \"\"\" Voxels with any triangle from ``surf``", "np.vstack((veh_surf[\"x\"], veh_surf[\"y\"], veh_surf[\"z\"])).T veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3]) veh_surf[\"x\"] =", "a tree, but we are using some of the functions", "(\"x_grid\", \"y_grid\", \"z_grid\")] vox_size = voxel_data[\"vox_size\"] ## Find the local", "-1e30 y_min, y_max = 1e30, -1e30 z_min, z_max = 1e30,", "y_grid, z_grid def convert_geom(veh_surf, tr_mat): \"\"\" Rotate nodes using provided", "mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1.0, 0.5, 0.5), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1)", "from within cython where resizing is tricky. \"\"\" if self.next_free", "boolean grid\"\"\" import logging import numpy as np from rpl.tools.ray_tracing.bsp_tree_poly", "if adding nodes from within cython where resizing is tricky.", "allocate_step=100000): \"\"\" Store the triangles with an enumeration so that", "data_io class BSP_Grid(object): def __init__(self, node_array, tris, allocate_step=100000): \"\"\" Store", "convert_geom(veh_surf, tr_mat) x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords, settings) voxel_data =", "part min_x, max_x = np.min(surf[\"x\"]) - vox_size, np.max(surf[\"x\"]) + vox_size", "HATCHES, 16: MANIKINS} vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False) #", "so mark as occupied occupied_voxels[i, j, k] |= surf_mask b_z_root", "Returns the index of the newly added node. \"\"\" if", "color=(0.9, 0.9, 0.9), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo, yo, zo", "\"\"\" Make sure that ``num_add_nodes`` can be added later without", "np.where(voxel_data[\"value\"] & 8) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.6, 0.6,", "= node self.next_free += 1 return self.next_free - 1 def", "z_grid = make_grid(vehicle_comp_coords, settings) voxel_data = {\"x_grid\": x_grid, \"y_grid\": y_grid,", "tricky. \"\"\" if self.next_free + num_add_nodes >= self.array_size: self._resize() return", "Reference to the full list of nodes self._resize() self.next_free =", "+ vox_size min_z, max_z = np.min(surf[\"z\"]) - vox_size, np.max(surf[\"z\"]) +", "b_below_x for j, y_pos in enumerate(y_pts[1:]): if b_y_root is None:", "dtype=np.int32).reshape((len(tris), 1)) minus_ones = -np.ones((len(tris), 6), dtype=np.int32) self.tris = np.hstack((tris,", "+ settings[\"voxel_size\"], settings[\"voxel_size\"]) return x_grid, y_grid, z_grid def convert_geom(veh_surf, tr_mat):", "types of voxels VOXEL_LABELS = {2: HULLS, 4: DOORS, 8:", "n-1 splits for i, x_pos in enumerate(x_pts[1:]): if x_pos <", "- settings[\"voxel_size\"], x_max + settings[\"voxel_size\"] y_min, y_max = y_min -", "coordinates and find overall best bounding box veh_surf = convert_geom(veh_surf,", "if b_y_root is None: break if y_pos < min_y: continue", "= b_above_x return voxel_data ############# # Main code def main(vehicle_comp_coords,", "Rotate nodes using provided transformation matrix; convert xyz node dict", "y_pos in enumerate(y_pts[1:]): if b_y_root is None: break if y_pos", "grid. \"\"\" for key, veh_surf in vehicle_comp_coords.items(): # Convert coordinates", "is cross product of current z axis and sfc normal", "Make sure that ``num_add_nodes`` can be added later without needing", "b_tree = BSP_Grid(nodes, tris) # Create BSP tree elements- we're", "an enumeration so that even when they are subdivided their", "{'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'} HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'} HULLS = {\"Hull_Assembly_Parametric\", 'Hull_Assembly_Example_With_Connector'}", "box x_min, x_max = 1e30, -1e30 y_min, y_max = 1e30,", "so object aligns with cartesian axes of occ voxel grid,", "points x_grid = np.arange(x_min, x_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) y_grid =", "Adds a new node to the end of the node", "mlab xo, yo, zo = np.where(voxel_data[\"value\"] == 1) plot_vehicle =", "find overall best bounding box veh_surf = convert_geom(veh_surf, tr_mat) x_grid,", "= np.hstack((tris, minus_ones, tri_nums)) self.allocate_step = allocate_step self.node_array = node_array", "convert xyz node dict to nodes array \"\"\" veh_surf[\"nodes\"] =", "in voxel_masks.items(): if veh_surf['part_class'] in geo_set: surf_mask |= mask voxel_data", "= veh_surf['nodes'][:, 0] veh_surf[\"y\"] = veh_surf['nodes'][:, 1] veh_surf[\"z\"] = veh_surf['nodes'][:,", "\"\"\" for key, veh_surf in vehicle_comp_coords.items(): # Convert coordinates and", "but we are using some of the functions b_x_root =", "of parts. Combine on a uniform grid. \"\"\" for key,", "+ settings[\"voxel_size\"], settings[\"voxel_size\"]) z_grid = np.arange(z_min, z_max + settings[\"voxel_size\"], settings[\"voxel_size\"])", "np.max(veh_surf[\"y\"])) z_min, z_max = min(z_min, np.min(veh_surf[\"z\"])), max(z_max, np.max(veh_surf[\"z\"])) x_min, x_max", "- settings[\"voxel_size\"], y_max + settings[\"voxel_size\"] z_min, z_max = z_min -", "= z_min - settings[\"voxel_size\"], z_max + settings[\"voxel_size\"] ########################################### # Create", "list of parts. Combine on a uniform grid. \"\"\" for", "vox_size, np.max(surf[\"y\"]) + vox_size min_z, max_z = np.min(surf[\"z\"]) - vox_size,", "np.max(surf[\"y\"]) + vox_size min_z, max_z = np.min(surf[\"z\"]) - vox_size, np.max(surf[\"z\"])", "dtype=np.uint32) occupied_voxels = voxel_data[\"value\"] ## The [1:] is because to", "1e30, -1e30 for key, veh_surf in veh_surfs.items(): x_min, x_max =", "x_pos) b_y_root = b_below_x for j, y_pos in enumerate(y_pts[1:]): if", "x_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) y_grid = np.arange(y_min, y_max + settings[\"voxel_size\"],", "num_add_nodes): \"\"\" Make sure that ``num_add_nodes`` can be added later", "+ vox_size min_y, max_y = np.min(surf[\"y\"]) - vox_size, np.max(surf[\"y\"]) +", "overall best bounding box veh_surf = convert_geom(veh_surf, tr_mat) x_grid, y_grid,", "Special labels applied to specific types of voxels VOXEL_LABELS =", "\"\"\" Rotate nodes using provided transformation matrix; convert xyz node", "\"\"\" Make coordinates of voxelated grid based on overall list", "if z_pos < min_z: continue if z_pos > max_z: break", "'Hatch_Assembly_Personnel_Door'} HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'} HULLS = {\"Hull_Assembly_Parametric\", 'Hull_Assembly_Example_With_Connector'} MANIKINS", "end of the node array (expanding if required). Returns the", "tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang) # voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS,", "node. \"\"\" if self.next_free == self.array_size: self._resize() self.node_array[self.next_free] = node", "a voxel array is created and returned. \"\"\" nodes =", "triangles with an enumeration so that even when they are", "BSP tree elements- we're not using a tree, but we", "tr_mat, VOXEL_LABELS, SETTINGS) from mayavi import mlab xo, yo, zo", "occupied_voxels = voxel_data[\"value\"] ## The [1:] is because to make", "k, z_pos in enumerate(z_pts[1:]): if b_z_root is None: break if", "y_max = min(y_min, np.min(veh_surf[\"y\"])), max(y_max, np.max(veh_surf[\"y\"])) z_min, z_max = min(z_min,", "= x_min - settings[\"voxel_size\"], x_max + settings[\"voxel_size\"] y_min, y_max =", "opacity=1) xo, yo, zo = np.where(voxel_data[\"value\"] & 2) plot_vehicle =", "Increase node array size by the allocate_step amount. \"\"\" self.array_size", "in vehicle_comp_coords.items(): # Convert coordinates and find overall best bounding", "surf_mask = 1 for mask, geo_set in voxel_masks.items(): if veh_surf['part_class']", "and find overall best bounding box veh_surf = convert_geom(veh_surf, tr_mat)", "\"\"\" Voxels with any triangle from ``surf`` are considered occupied", "3)))) def add_node(self, node): \"\"\" Adds a new node to", ">= self.array_size: self._resize() return self.next_free def make_grid(veh_surfs, settings): \"\"\" Make", "= data_io.load_array(vox_veh_folder, vox_veh_file, True) except: voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS,", "0.9), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo, yo, zo = np.where(voxel_data[\"value\"]", "self.allocate_step self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3)))) def add_node(self, node): \"\"\"", "settings[\"voxel_size\"], x_max + settings[\"voxel_size\"] y_min, y_max = y_min - settings[\"voxel_size\"],", "voxel_data ############# # Main code def main(vehicle_comp_coords, tr_mat, voxel_masks, settings):", "import mlab xo, yo, zo = np.where(voxel_data[\"value\"] == 1) plot_vehicle", "0.8), # scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], # mode='cube', opacity=1.0) mlab.show() # Save", "and returned. \"\"\" nodes = surf[\"nodes\"] tris = surf[\"tris\"] x_pts,", "_resize(self): \"\"\" Increase node array size by the allocate_step amount.", "np.max(surf[\"x\"]) + vox_size min_y, max_y = np.min(surf[\"y\"]) - vox_size, np.max(surf[\"y\"])", "as np from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element from rpl.tools.geometry import geom_utils", "= np.min(surf[\"z\"]) - vox_size, np.max(surf[\"z\"]) + vox_size b_tree = BSP_Grid(nodes,", "HULLS, 4: DOORS, 8: HATCHES, 16: MANIKINS} vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set()", "np.zeros((size_i - 1, size_j - 1, size_k - 1), dtype=np.uint32)", "= np.where(voxel_data[\"value\"] & 16) # plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], # voxel_data[\"y_grid\"][yo],", "geometries in a list of parts. Combine on a uniform", "uniformly spaced grid points x_grid = np.arange(x_min, x_max + settings[\"voxel_size\"],", "vox_veh_folder = r\"voxelated_models/vehicles/{}/{}\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) vox_veh_file = \"voxels_{}_vox{}_hacked\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) try: voxel_data", "y_min, y_max = min(y_min, np.min(veh_surf[\"y\"])), max(y_max, np.max(veh_surf[\"y\"])) z_min, z_max =", "vox_size, np.max(surf[\"z\"]) + vox_size b_tree = BSP_Grid(nodes, tris) # Create", "so that even when they are subdivided their identity is", "\"\"\" ## Find overall bounding box x_min, x_max = 1e30,", "vox_veh_file = \"voxels_{}_vox{}_hacked\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) try: voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True)", "settings[\"voxel_size\"] z_min, z_max = z_min - settings[\"voxel_size\"], z_max + settings[\"voxel_size\"]", "and or'ed with ``group_mask``. If the supplied ``occupied_voxels`` is None", "geo_set in voxel_masks.items(): if veh_surf['part_class'] in geo_set: surf_mask |= mask", "b_above_y b_x_root = b_above_x return voxel_data ############# # Main code", "1, 1), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=0.05) xo, yo, zo =", "xo, yo, zo = np.where(voxel_data[\"value\"] == 1) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo],", "|= mask voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data) return voxel_data if", "to specific types of voxels VOXEL_LABELS = {2: HULLS, 4:", "z_pos) if not (b_below_z and (len(b_below_z.tris) == 0)): ## There", "np.max(surf[\"z\"]) + vox_size b_tree = BSP_Grid(nodes, tris) # Create BSP", "grid, +z=up # Vector to rotate around is cross product", "using provided transformation matrix; convert xyz node dict to nodes", "index of the newly added node. \"\"\" if self.next_free ==", "cython where resizing is tricky. \"\"\" if self.next_free + num_add_nodes", "y_pos) b_z_root = b_below_y for k, z_pos in enumerate(z_pts[1:]): if", "None} for key, veh_surf in vehicle_comp_coords.items(): # Build up the", "np.concatenate((self.node_array, np.zeros((self.allocate_step, 3)))) def add_node(self, node): \"\"\" Adds a new", "node_array # Reference to the full list of nodes self._resize()", "voxelated grid based on overall list of vehicle surfaces \"\"\"", "1e30, -1e30 y_min, y_max = 1e30, -1e30 z_min, z_max =", "= np.where(voxel_data[\"value\"] & 4) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1.0,", "= b_z_root.split_at(2, z_pos) if not (b_below_z and (len(b_below_z.tris) == 0)):", "in geo_set: surf_mask |= mask voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data)", "surf_mask b_z_root = b_above_z b_y_root = b_above_y b_x_root = b_above_x", "2) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1, 1, 1), scale_mode=\"none\",", "node_array, tris, allocate_step=100000): \"\"\" Store the triangles with an enumeration", "node array size by the allocate_step amount. \"\"\" self.array_size =", "for key, veh_surf in vehicle_comp_coords.items(): # Convert coordinates and find", "## Find the local extents of this part min_x, max_x", "add_node(self, node): \"\"\" Adds a new node to the end", "y_pos < min_y: continue if y_pos > max_y: break b_above_y,", "xo, yo, zo = np.where(voxel_data[\"value\"] & 8) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo],", "= b_above_z b_y_root = b_above_y b_x_root = b_above_x return voxel_data", "continue if z_pos > max_z: break b_above_z, b_below_z = b_z_root.split_at(2,", "max(y_max, np.max(veh_surf[\"y\"])) z_min, z_max = min(z_min, np.min(veh_surf[\"z\"])), max(z_max, np.max(veh_surf[\"z\"])) x_min,", "of the node array (expanding if required). Returns the index", "surface using a 3D boolean grid\"\"\" import logging import numpy", "elements- we're not using a tree, but we are using", "considered occupied and or'ed with ``group_mask``. If the supplied ``occupied_voxels``", "break b_above_y, b_below_y = b_y_root.split_at(1, y_pos) b_z_root = b_below_y for", "z_grid = np.arange(z_min, z_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) return x_grid, y_grid,", "np.array([0, 0, 1])) rot_ang = -np.arccos(veh_up[2]) tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang)", "z_min, z_max = min(z_min, np.min(veh_surf[\"z\"])), max(z_max, np.max(veh_surf[\"z\"])) x_min, x_max =", "= veh_surf['nodes'][:, 1] veh_surf[\"z\"] = veh_surf['nodes'][:, 2] return veh_surf def", "up the voxel_data logging.debug(\"Sampling component: {}\".format(key)) ## Default mask is", "## Create the occupied voxels if none were supplied if", "np.min(veh_surf[\"z\"])), max(z_max, np.max(veh_surf[\"z\"])) x_min, x_max = x_min - settings[\"voxel_size\"], x_max", "when they are subdivided their identity is not lost. \"\"\"", "best bounding box veh_surf = convert_geom(veh_surf, tr_mat) x_grid, y_grid, z_grid", "except: voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) from mayavi import", "to nodes array \"\"\" veh_surf[\"nodes\"] = np.vstack((veh_surf[\"x\"], veh_surf[\"y\"], veh_surf[\"z\"])).T veh_surf['nodes']", "mask, geo_set in voxel_masks.items(): if veh_surf['part_class'] in geo_set: surf_mask |=", "0.]) rot_around = np.cross(veh_up, np.array([0, 0, 1])) rot_ang = -np.arccos(veh_up[2])", "# voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) vox_veh_folder = r\"voxelated_models/vehicles/{}/{}\".format(SETTINGS[\"run_id\"],", "given direction we need n-1 splits for i, x_pos in", "\"\"\" Store the triangles with an enumeration so that even", "scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) # No manikins included, no need", "zo = np.where(voxel_data[\"value\"] & 16) # plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], #", "color=(0.5, 1.0, 0.8), # scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], # mode='cube', opacity=1.0) mlab.show()", "vehicle_comp_coords.items(): # Build up the voxel_data logging.debug(\"Sampling component: {}\".format(key)) ##", "occ voxel grid, +z=up # Vector to rotate around is", "tris = surf[\"tris\"] x_pts, y_pts, z_pts = [voxel_data[k] for k", "x_pos in enumerate(x_pts[1:]): if x_pos < min_x: continue if x_pos", "minus_ones = -np.ones((len(tris), 6), dtype=np.int32) self.tris = np.hstack((tris, minus_ones, tri_nums))", "from rpl.tools.api import test_bench_api as tb_api SETTINGS = tb_api.load_settings(\"settings.js\") DOORS", "len(self.node_array) + self.allocate_step self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3)))) def add_node(self,", "a resize. Useful if adding nodes from within cython where", "settings[\"voxel_size\"]) return x_grid, y_grid, z_grid def convert_geom(veh_surf, tr_mat): \"\"\" Rotate", "scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) # No manikins included, no need to", "-1e30 for key, veh_surf in veh_surfs.items(): x_min, x_max = min(x_min,", "any triangle from ``surf`` are considered occupied and or'ed with", "class BSP_Grid(object): def __init__(self, node_array, tris, allocate_step=100000): \"\"\" Store the", "mask voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data) return voxel_data if __name__", "Save the voxelated model of the vehicle (sans door and", "without needing a resize. Useful if adding nodes from within", "mayavi import mlab xo, yo, zo = np.where(voxel_data[\"value\"] == 1)", "if x_pos > max_x: break b_above_x, b_below_x = b_x_root.split_at(0, x_pos)", "Vector to rotate around is cross product of current z", "0.5), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo, yo, zo = np.where(voxel_data[\"value\"]", "Create the occupied voxels if none were supplied if voxel_data[\"value\"]", "of vehicle surfaces \"\"\" ## Find overall bounding box x_min,", "z_pos in enumerate(z_pts[1:]): if b_z_root is None: break if z_pos", "if self.next_free == self.array_size: self._resize() self.node_array[self.next_free] = node self.next_free +=", "1.0, 0.8), # scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], # mode='cube', opacity=1.0) mlab.show() #", "applied to specific types of voxels VOXEL_LABELS = {2: HULLS,", "SETTINGS[\"voxel_size\"]) vox_veh_file = \"voxels_{}_vox{}_hacked\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) try: voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file,", "= BSP_Grid(nodes, tris) # Create BSP tree elements- we're not", "numpy as np from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element from rpl.tools.geometry import", "tree elements- we're not using a tree, but we are", "= 1e30, -1e30 z_min, z_max = 1e30, -1e30 for key,", "y_pts, z_pts = [voxel_data[k] for k in (\"x_grid\", \"y_grid\", \"z_grid\")]", "= main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) vox_veh_folder = r\"voxelated_models/vehicles/{}/{}\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) vox_veh_file", "x_max = 1e30, -1e30 y_min, y_max = 1e30, -1e30 z_min,", "in an identified set surf_mask = 1 for mask, geo_set", "voxel_data[\"value\"] = np.zeros((size_i - 1, size_j - 1, size_k -", "veh_surf[\"z\"] = veh_surf['nodes'][:, 2] return veh_surf def find_occupied_voxels(surf, surf_mask, voxel_data):", "+ settings[\"voxel_size\"] z_min, z_max = z_min - settings[\"voxel_size\"], z_max +", "z_min, z_max = z_min - settings[\"voxel_size\"], z_max + settings[\"voxel_size\"] ###########################################", "make_grid(vehicle_comp_coords, settings) voxel_data = {\"x_grid\": x_grid, \"y_grid\": y_grid, \"z_grid\": z_grid,", "data_io.load_array(vox_veh_folder, vox_veh_file, True) except: voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS)", "= mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.6, 0.6, 1.0), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube',", "zo = np.where(voxel_data[\"value\"] & 4) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo],", "8: HATCHES, 16: MANIKINS} vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False)", "node): \"\"\" Adds a new node to the end of", "x_min, x_max = 1e30, -1e30 y_min, y_max = 1e30, -1e30", "-1e30 z_min, z_max = 1e30, -1e30 for key, veh_surf in", "def prepare_add(self, num_add_nodes): \"\"\" Make sure that ``num_add_nodes`` can be", "## The [1:] is because to make n voxels in", "to rotate around is cross product of current z axis", "axis and sfc normal veh_up = np.array([0., 1., 0.]) rot_around", "a triangulated surface using a 3D boolean grid\"\"\" import logging", "SETTINGS) vox_veh_folder = r\"voxelated_models/vehicles/{}/{}\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) vox_veh_file = \"voxels_{}_vox{}_hacked\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) try:", "return self.next_free def make_grid(veh_surfs, settings): \"\"\" Make coordinates of voxelated", "> max_y: break b_above_y, b_below_y = b_y_root.split_at(1, y_pos) b_z_root =", "set surf_mask = 1 for mask, geo_set in voxel_masks.items(): if", "direction we need n-1 splits for i, x_pos in enumerate(x_pts[1:]):", "## Find overall bounding box x_min, x_max = 1e30, -1e30", "x_grid = np.arange(x_min, x_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) y_grid = np.arange(y_min,", "not using a tree, but we are using some of", "y_max = 1e30, -1e30 z_min, z_max = 1e30, -1e30 for", "node coords so object aligns with cartesian axes of occ", "__name__ == \"__main__\": from rpl.tools.api import test_bench_api as tb_api SETTINGS", "np.cross(veh_up, np.array([0, 0, 1])) rot_ang = -np.arccos(veh_up[2]) tr_mat = geom_utils.rotation_about_vector(rot_around,", "veh_surf['part_class'] in geo_set: surf_mask |= mask voxel_data = find_occupied_voxels(veh_surf, surf_mask,", "np.min(veh_surf[\"x\"])), max(x_max, np.max(veh_surf[\"x\"])) y_min, y_max = min(y_min, np.min(veh_surf[\"y\"])), max(y_max, np.max(veh_surf[\"y\"]))", "x_max = x_min - settings[\"voxel_size\"], x_max + settings[\"voxel_size\"] y_min, y_max", "rot_ang) # voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) vox_veh_folder =", "# mode='cube', opacity=1.0) mlab.show() # Save the voxelated model of", "rotate around is cross product of current z axis and", "is None: break if y_pos < min_y: continue if y_pos", "self.node_array[self.next_free] = node self.next_free += 1 return self.next_free - 1", "np.where(voxel_data[\"value\"] & 4) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1.0, 0.5,", "of voxelated grid based on overall list of vehicle surfaces", "\"\"\" if self.next_free + num_add_nodes >= self.array_size: self._resize() return self.next_free", "product of current z axis and sfc normal veh_up =", "that even when they are subdivided their identity is not", "& 2) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1, 1, 1),", "{'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'} HULLS = {\"Hull_Assembly_Parametric\", 'Hull_Assembly_Example_With_Connector'} MANIKINS = {\"Manikin\"} #", "# color=(0.5, 1.0, 0.8), # scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], # mode='cube', opacity=1.0)", "BSP_Grid(object): def __init__(self, node_array, tris, allocate_step=100000): \"\"\" Store the triangles", "not (b_below_z and (len(b_below_z.tris) == 0)): ## There is at", "in enumerate(z_pts[1:]): if b_z_root is None: break if z_pos <", "settings[\"voxel_size\"]) z_grid = np.arange(z_min, z_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) return x_grid,", "+z=up # Vector to rotate around is cross product of", "node dict to nodes array \"\"\" veh_surf[\"nodes\"] = np.vstack((veh_surf[\"x\"], veh_surf[\"y\"],", "16) # plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], # voxel_data[\"y_grid\"][yo], # voxel_data[\"z_grid\"][zo], #", "surf_mask |= mask voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data) return voxel_data", "voxel_masks.items(): if veh_surf['part_class'] in geo_set: surf_mask |= mask voxel_data =", "vehicle_comp_coords.items(): # Convert coordinates and find overall best bounding box", "- MANIKINS, single_file=False) # Modify node coords so object aligns", "= veh_surf['nodes'][:, 2] return veh_surf def find_occupied_voxels(surf, surf_mask, voxel_data): \"\"\"", "= b_below_x for j, y_pos in enumerate(y_pts[1:]): if b_y_root is", "= min(z_min, np.min(veh_surf[\"z\"])), max(z_max, np.max(veh_surf[\"z\"])) x_min, x_max = x_min -", "np.arange(z_min, z_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) return x_grid, y_grid, z_grid def", "0)): ## There is at least part of triangle here", "aligns with cartesian axes of occ voxel grid, +z=up #", "voxel_data[\"y_grid\"][yo], # voxel_data[\"z_grid\"][zo], # color=(0.5, 1.0, 0.8), # scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"],", "surf[\"nodes\"] tris = surf[\"tris\"] x_pts, y_pts, z_pts = [voxel_data[k] for", "y_max + settings[\"voxel_size\"] z_min, z_max = z_min - settings[\"voxel_size\"], z_max", "resizing is tricky. \"\"\" if self.next_free + num_add_nodes >= self.array_size:", "``surf`` are considered occupied and or'ed with ``group_mask``. If the", "a uniform grid. \"\"\" for key, veh_surf in vehicle_comp_coords.items(): #", "= {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'} HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'} HULLS = {\"Hull_Assembly_Parametric\",", "= tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False) # Modify node coords so", "break if y_pos < min_y: continue if y_pos > max_y:", "tree, but we are using some of the functions b_x_root", "mode='cube', opacity=1) xo, yo, zo = np.where(voxel_data[\"value\"] & 8) plot_vehicle", "< min_x: continue if x_pos > max_x: break b_above_x, b_below_x", "# voxel_data[\"y_grid\"][yo], # voxel_data[\"z_grid\"][zo], # color=(0.5, 1.0, 0.8), # scale_mode=\"none\",", "in a given direction we need n-1 splits for i,", "in a list of parts. Combine on a uniform grid.", "- vox_size, np.max(surf[\"y\"]) + vox_size min_z, max_z = np.min(surf[\"z\"]) -", "= min(y_min, np.min(veh_surf[\"y\"])), max(y_max, np.max(veh_surf[\"y\"])) z_min, z_max = min(z_min, np.min(veh_surf[\"z\"])),", "veh_surf[\"x\"] = veh_surf['nodes'][:, 0] veh_surf[\"y\"] = veh_surf['nodes'][:, 1] veh_surf[\"z\"] =", "x_max + settings[\"voxel_size\"] y_min, y_max = y_min - settings[\"voxel_size\"], y_max", "min_y, max_y = np.min(surf[\"y\"]) - vox_size, np.max(surf[\"y\"]) + vox_size min_z,", "for j, y_pos in enumerate(y_pts[1:]): if b_y_root is None: break", "on a uniform grid. \"\"\" for key, veh_surf in vehicle_comp_coords.items():", "= {2: HULLS, 4: DOORS, 8: HATCHES, 16: MANIKINS} vehicle_surfs", "and sfc normal veh_up = np.array([0., 1., 0.]) rot_around =", "color=(1, 1, 1), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=0.05) xo, yo, zo", "b_below_z = b_z_root.split_at(2, z_pos) if not (b_below_z and (len(b_below_z.tris) ==", "\"\"\" nodes = surf[\"nodes\"] tris = surf[\"tris\"] x_pts, y_pts, z_pts", "self.next_free + num_add_nodes >= self.array_size: self._resize() return self.next_free def make_grid(veh_surfs,", "grid points x_grid = np.arange(x_min, x_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) y_grid", "scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=0.05) xo, yo, zo = np.where(voxel_data[\"value\"] &", "array (expanding if required). Returns the index of the newly", "opacity=0.05) xo, yo, zo = np.where(voxel_data[\"value\"] & 4) plot_vehicle =", "to the end of the node array (expanding if required).", "least part of triangle here so mark as occupied occupied_voxels[i,", "if b_z_root is None: break if z_pos < min_z: continue", "splits for i, x_pos in enumerate(x_pts[1:]): if x_pos < min_x:", "a list of parts. Combine on a uniform grid. \"\"\"", "logging.debug(\"Sampling component: {}\".format(key)) ## Default mask is 1 for anything", "1, size_j - 1, size_k - 1), dtype=np.uint32) occupied_voxels =", "voxel_data[\"vox_size\"] ## Find the local extents of this part min_x,", "voxels VOXEL_LABELS = {2: HULLS, 4: DOORS, 8: HATCHES, 16:", "{\"x_grid\": x_grid, \"y_grid\": y_grid, \"z_grid\": z_grid, \"vox_size\": settings[\"voxel_size\"], \"csys_trans\": tr_mat,", "vehicle surfaces \"\"\" ## Find overall bounding box x_min, x_max", "geom_utils import data_io class BSP_Grid(object): def __init__(self, node_array, tris, allocate_step=100000):", "if x_pos < min_x: continue if x_pos > max_x: break", "############# # Main code def main(vehicle_comp_coords, tr_mat, voxel_masks, settings): \"\"\"", "yo, zo = np.where(voxel_data[\"value\"] & 2) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo],", "z_max = z_min - settings[\"voxel_size\"], z_max + settings[\"voxel_size\"] ########################################### #", "be added later without needing a resize. Useful if adding", "tris) # Create BSP tree elements- we're not using a", "the functions b_x_root = BSP_Element(b_tree.tris, b_tree) size_i, size_j, size_k =", "min_x, max_x = np.min(surf[\"x\"]) - vox_size, np.max(surf[\"x\"]) + vox_size min_y,", "Combine on a uniform grid. \"\"\" for key, veh_surf in", "= b_x_root.split_at(0, x_pos) b_y_root = b_below_x for j, y_pos in", "zo = np.where(voxel_data[\"value\"] & 2) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo],", "import logging import numpy as np from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element", "= np.concatenate((self.node_array, np.zeros((self.allocate_step, 3)))) def add_node(self, node): \"\"\" Adds a", "plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.9, 0.9, 0.9), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"],", "import geom_utils import data_io class BSP_Grid(object): def __init__(self, node_array, tris,", "part of triangle here so mark as occupied occupied_voxels[i, j,", "voxelated model of the vehicle (sans door and other excluded", "is created and returned. \"\"\" nodes = surf[\"nodes\"] tris =", "mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.9, 0.9, 0.9), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1)", "mlab.points3d(voxel_data[\"x_grid\"][xo], # voxel_data[\"y_grid\"][yo], # voxel_data[\"z_grid\"][zo], # color=(0.5, 1.0, 0.8), #", "of triangle here so mark as occupied occupied_voxels[i, j, k]", "1])) rot_ang = -np.arccos(veh_up[2]) tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang) # voxel_data", "mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.6, 0.6, 1.0), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1)", "r\"voxelated_models/vehicles/{}/{}\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) vox_veh_file = \"voxels_{}_vox{}_hacked\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) try: voxel_data = data_io.load_array(vox_veh_folder,", "settings): \"\"\" Perform voxelization for all vehicle geometries in a", "np.min(veh_surf[\"y\"])), max(y_max, np.max(veh_surf[\"y\"])) z_min, z_max = min(z_min, np.min(veh_surf[\"z\"])), max(z_max, np.max(veh_surf[\"z\"]))", "mode='cube', opacity=1) xo, yo, zo = np.where(voxel_data[\"value\"] & 2) plot_vehicle", "x_min, x_max = min(x_min, np.min(veh_surf[\"x\"])), max(x_max, np.max(veh_surf[\"x\"])) y_min, y_max =", "for all vehicle geometries in a list of parts. Combine", "z_grid def convert_geom(veh_surf, tr_mat): \"\"\" Rotate nodes using provided transformation", "to make n voxels in a given direction we need", "voxel_data[\"z_grid\"][zo], color=(1.0, 0.5, 0.5), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo, yo,", "# xo, yo, zo = np.where(voxel_data[\"value\"] & 16) # plot_vehicle", "max_z = np.min(surf[\"z\"]) - vox_size, np.max(surf[\"z\"]) + vox_size b_tree =", "the triangles with an enumeration so that even when they", "nodes array \"\"\" veh_surf[\"nodes\"] = np.vstack((veh_surf[\"x\"], veh_surf[\"y\"], veh_surf[\"z\"])).T veh_surf['nodes'] =", "veh_surf in veh_surfs.items(): x_min, x_max = min(x_min, np.min(veh_surf[\"x\"])), max(x_max, np.max(veh_surf[\"x\"]))", "1), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=0.05) xo, yo, zo = np.where(voxel_data[\"value\"]", "= 1e30, -1e30 for key, veh_surf in veh_surfs.items(): x_min, x_max", "& 4) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1.0, 0.5, 0.5),", "Convert coordinates and find overall best bounding box veh_surf =", "voxelization for all vehicle geometries in a list of parts.", "np.zeros(len(self.tris), dtype=np.int32) def _resize(self): \"\"\" Increase node array size by", "2] return veh_surf def find_occupied_voxels(surf, surf_mask, voxel_data): \"\"\" Voxels with", "self.next_free += 1 return self.next_free - 1 def prepare_add(self, num_add_nodes):", "break b_above_x, b_below_x = b_x_root.split_at(0, x_pos) b_y_root = b_below_x for", "main(vehicle_comp_coords, tr_mat, voxel_masks, settings): \"\"\" Perform voxelization for all vehicle", "that ``num_add_nodes`` can be added later without needing a resize.", "subdivided their identity is not lost. \"\"\" tri_nums = np.arange(len(tris),", "x_grid, y_grid, z_grid def convert_geom(veh_surf, tr_mat): \"\"\" Rotate nodes using", "= np.vstack((veh_surf[\"x\"], veh_surf[\"y\"], veh_surf[\"z\"])).T veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3]) veh_surf[\"x\"]", "self.next_free = len(node_array) self.split_cache = np.zeros(len(self.tris), dtype=np.int32) def _resize(self): \"\"\"", "as tb_api SETTINGS = tb_api.load_settings(\"settings.js\") DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'} HATCHES", "zo = np.where(voxel_data[\"value\"] & 8) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo],", "surfaces \"\"\" ## Find overall bounding box x_min, x_max =", "HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'} HULLS = {\"Hull_Assembly_Parametric\", 'Hull_Assembly_Example_With_Connector'} MANIKINS =", "b_above_x return voxel_data ############# # Main code def main(vehicle_comp_coords, tr_mat,", "= np.where(voxel_data[\"value\"] & 2) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1,", "vox_size min_y, max_y = np.min(surf[\"y\"]) - vox_size, np.max(surf[\"y\"]) + vox_size", "grid\"\"\" import logging import numpy as np from rpl.tools.ray_tracing.bsp_tree_poly import", "import BSP_Element from rpl.tools.geometry import geom_utils import data_io class BSP_Grid(object):", "# Vector to rotate around is cross product of current", "scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], # mode='cube', opacity=1.0) mlab.show() # Save the voxelated", "b_z_root is None: break if z_pos < min_z: continue if", "True) except: voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) from mayavi", "if veh_surf['part_class'] in geo_set: surf_mask |= mask voxel_data = find_occupied_voxels(veh_surf,", "= r\"voxelated_models/vehicles/{}/{}\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) vox_veh_file = \"voxels_{}_vox{}_hacked\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) try: voxel_data =", "manikins included, no need to plot them # xo, yo,", "veh_surf in vehicle_comp_coords.items(): # Convert coordinates and find overall best", "# Build up the voxel_data logging.debug(\"Sampling component: {}\".format(key)) ## Default", "color=(1.0, 0.5, 0.5), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo, yo, zo", "\"\"\" Represent a triangulated surface using a 3D boolean grid\"\"\"", "-np.ones((len(tris), 6), dtype=np.int32) self.tris = np.hstack((tris, minus_ones, tri_nums)) self.allocate_step =", "- vox_size, np.max(surf[\"z\"]) + vox_size b_tree = BSP_Grid(nodes, tris) #", "voxel_data[\"value\"] ## The [1:] is because to make n voxels", "min_y: continue if y_pos > max_y: break b_above_y, b_below_y =", "vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False) # Modify node coords", "of current z axis and sfc normal veh_up = np.array([0.,", "occupied occupied_voxels[i, j, k] |= surf_mask b_z_root = b_above_z b_y_root", "# Convert coordinates and find overall best bounding box veh_surf", "with any triangle from ``surf`` are considered occupied and or'ed", "xyz node dict to nodes array \"\"\" veh_surf[\"nodes\"] = np.vstack((veh_surf[\"x\"],", "== 1) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.9, 0.9, 0.9),", "yo, zo = np.where(voxel_data[\"value\"] & 4) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo],", "0, 1])) rot_ang = -np.arccos(veh_up[2]) tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang) #", "= voxel_data[\"vox_size\"] ## Find the local extents of this part", "Store the triangles with an enumeration so that even when", "self._resize() self.node_array[self.next_free] = node self.next_free += 1 return self.next_free -", "find_occupied_voxels(veh_surf, surf_mask, voxel_data) return voxel_data if __name__ == \"__main__\": from", "- 1, size_j - 1, size_k - 1), dtype=np.uint32) occupied_voxels", "3D boolean grid\"\"\" import logging import numpy as np from", "returned. \"\"\" nodes = surf[\"nodes\"] tris = surf[\"tris\"] x_pts, y_pts,", "in enumerate(y_pts[1:]): if b_y_root is None: break if y_pos <", "return voxel_data ############# # Main code def main(vehicle_comp_coords, tr_mat, voxel_masks,", "grid based on overall list of vehicle surfaces \"\"\" ##", "newly added node. \"\"\" if self.next_free == self.array_size: self._resize() self.node_array[self.next_free]", "overall bounding box x_min, x_max = 1e30, -1e30 y_min, y_max", "is 1 for anything not in an identified set surf_mask", "the local extents of this part min_x, max_x = np.min(surf[\"x\"])", "of the newly added node. \"\"\" if self.next_free == self.array_size:", "{}\".format(key)) ## Default mask is 1 for anything not in", "No manikins included, no need to plot them # xo,", "= len(node_array) self.split_cache = np.zeros(len(self.tris), dtype=np.int32) def _resize(self): \"\"\" Increase", "veh_surf['nodes'][:, 2] return veh_surf def find_occupied_voxels(surf, surf_mask, voxel_data): \"\"\" Voxels", "DOORS, 8: HATCHES, 16: MANIKINS} vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS,", "try: voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True) except: voxel_data = main(vehicle_surfs,", "main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) from mayavi import mlab xo, yo,", "component: {}\".format(key)) ## Default mask is 1 for anything not", "settings[\"voxel_size\"] ########################################### # Create the uniformly spaced grid points x_grid", "voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) from mayavi import mlab", "amount. \"\"\" self.array_size = len(self.node_array) + self.allocate_step self.node_array = np.concatenate((self.node_array,", "z_max + settings[\"voxel_size\"] ########################################### # Create the uniformly spaced grid", "created and returned. \"\"\" nodes = surf[\"nodes\"] tris = surf[\"tris\"]", "for k, z_pos in enumerate(z_pts[1:]): if b_z_root is None: break", "\"z_grid\")] vox_size = voxel_data[\"vox_size\"] ## Find the local extents of", "y_grid, \"z_grid\": z_grid, \"vox_size\": settings[\"voxel_size\"], \"csys_trans\": tr_mat, \"value\": None} for", "``num_add_nodes`` can be added later without needing a resize. Useful", "node to the end of the node array (expanding if", "cross product of current z axis and sfc normal veh_up", "max_x = np.min(surf[\"x\"]) - vox_size, np.max(surf[\"x\"]) + vox_size min_y, max_y", "np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1)) minus_ones = -np.ones((len(tris), 6), dtype=np.int32) self.tris =", "max_y: break b_above_y, b_below_y = b_y_root.split_at(1, y_pos) b_z_root = b_below_y", "np.arange(x_min, x_max + settings[\"voxel_size\"], settings[\"voxel_size\"]) y_grid = np.arange(y_min, y_max +", "tb_api SETTINGS = tb_api.load_settings(\"settings.js\") DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'} HATCHES =", "= np.cross(veh_up, np.array([0, 0, 1])) rot_ang = -np.arccos(veh_up[2]) tr_mat =", "an identified set surf_mask = 1 for mask, geo_set in", "a new node to the end of the node array", "mark as occupied occupied_voxels[i, j, k] |= surf_mask b_z_root =", "0.9, 0.9), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo, yo, zo =", "matrix; convert xyz node dict to nodes array \"\"\" veh_surf[\"nodes\"]", "veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3]) veh_surf[\"x\"] = veh_surf['nodes'][:, 0] veh_surf[\"y\"]", "= len(x_pts), len(y_pts), len(z_pts) ## Create the occupied voxels if", "voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS) vox_veh_folder = r\"voxelated_models/vehicles/{}/{}\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"])", "{\"Hull_Assembly_Parametric\", 'Hull_Assembly_Example_With_Connector'} MANIKINS = {\"Manikin\"} # Special labels applied to", "array size by the allocate_step amount. \"\"\" self.array_size = len(self.node_array)", "of voxels VOXEL_LABELS = {2: HULLS, 4: DOORS, 8: HATCHES,", "= mlab.points3d(voxel_data[\"x_grid\"][xo], # voxel_data[\"y_grid\"][yo], # voxel_data[\"z_grid\"][zo], # color=(0.5, 1.0, 0.8),", "return self.next_free - 1 def prepare_add(self, num_add_nodes): \"\"\" Make sure", "within cython where resizing is tricky. \"\"\" if self.next_free +", "b_above_z b_y_root = b_above_y b_x_root = b_above_x return voxel_data #############", "\"\"\" if self.next_free == self.array_size: self._resize() self.node_array[self.next_free] = node self.next_free", "= np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1)) minus_ones = -np.ones((len(tris), 6), dtype=np.int32) self.tris", "required). Returns the index of the newly added node. \"\"\"", "= node_array # Reference to the full list of nodes", "of occ voxel grid, +z=up # Vector to rotate around", "plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1.0, 0.5, 0.5), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"],", "1, size_k - 1), dtype=np.uint32) occupied_voxels = voxel_data[\"value\"] ## The", "b_y_root.split_at(1, y_pos) b_z_root = b_below_y for k, z_pos in enumerate(z_pts[1:]):", "1., 0.]) rot_around = np.cross(veh_up, np.array([0, 0, 1])) rot_ang =", "rot_ang = -np.arccos(veh_up[2]) tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang) # voxel_data =", "dtype=np.int32) def _resize(self): \"\"\" Increase node array size by the", "supplied ``occupied_voxels`` is None a voxel array is created and", "of the vehicle (sans door and other excluded parts) data_io.save_multi_array(vox_veh_folder,", "identity is not lost. \"\"\" tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1))", "allocate_step self.node_array = node_array # Reference to the full list", "mlab.show() # Save the voxelated model of the vehicle (sans", "(len(b_below_z.tris) == 0)): ## There is at least part of", "import data_io class BSP_Grid(object): def __init__(self, node_array, tris, allocate_step=100000): \"\"\"", "min_z: continue if z_pos > max_z: break b_above_z, b_below_z =", "``group_mask``. If the supplied ``occupied_voxels`` is None a voxel array", "vehicle geometries in a list of parts. Combine on a", "== \"__main__\": from rpl.tools.api import test_bench_api as tb_api SETTINGS =", "b_above_y, b_below_y = b_y_root.split_at(1, y_pos) b_z_root = b_below_y for k,", "a 3D boolean grid\"\"\" import logging import numpy as np", "in enumerate(x_pts[1:]): if x_pos < min_x: continue if x_pos >", "'Hatch_Assembly_Cargo'} HULLS = {\"Hull_Assembly_Parametric\", 'Hull_Assembly_Example_With_Connector'} MANIKINS = {\"Manikin\"} # Special", "\"\"\" tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1)) minus_ones = -np.ones((len(tris), 6),", "self.split_cache = np.zeros(len(self.tris), dtype=np.int32) def _resize(self): \"\"\" Increase node array", "if __name__ == \"__main__\": from rpl.tools.api import test_bench_api as tb_api", "= BSP_Element(b_tree.tris, b_tree) size_i, size_j, size_k = len(x_pts), len(y_pts), len(z_pts)", "single_file=False) # Modify node coords so object aligns with cartesian", "transformation matrix; convert xyz node dict to nodes array \"\"\"", "size_i, size_j, size_k = len(x_pts), len(y_pts), len(z_pts) ## Create the", "MANIKINS} vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False) # Modify node", "b_z_root = b_above_z b_y_root = b_above_y b_x_root = b_above_x return", "& 8) plot_vehicle = mlab.points3d(voxel_data[\"x_grid\"][xo], voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(0.6, 0.6, 1.0),", "1e30, -1e30 z_min, z_max = 1e30, -1e30 for key, veh_surf", "in vehicle_comp_coords.items(): # Build up the voxel_data logging.debug(\"Sampling component: {}\".format(key))", "the vehicle (sans door and other excluded parts) data_io.save_multi_array(vox_veh_folder, vox_veh_file,", "needing a resize. Useful if adding nodes from within cython", "veh_surf['nodes'][:, 0] veh_surf[\"y\"] = veh_surf['nodes'][:, 1] veh_surf[\"z\"] = veh_surf['nodes'][:, 2]", "tr_mat, VOXEL_LABELS, SETTINGS) vox_veh_folder = r\"voxelated_models/vehicles/{}/{}\".format(SETTINGS[\"run_id\"], SETTINGS[\"voxel_size\"]) vox_veh_file = \"voxels_{}_vox{}_hacked\".format(SETTINGS[\"run_id\"],", "voxel_data[\"y_grid\"][yo], voxel_data[\"z_grid\"][zo], color=(1, 1, 1), scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=0.05) xo,", "\"\"\" Increase node array size by the allocate_step amount. \"\"\"", "is None: break if z_pos < min_z: continue if z_pos", "using a 3D boolean grid\"\"\" import logging import numpy as", "opacity=1) xo, yo, zo = np.where(voxel_data[\"value\"] & 8) plot_vehicle =", "= allocate_step self.node_array = node_array # Reference to the full", "uniform grid. \"\"\" for key, veh_surf in vehicle_comp_coords.items(): # Convert", "scale_mode=\"none\", scale_factor=voxel_data[\"vox_size\"], mode='cube', opacity=1) xo, yo, zo = np.where(voxel_data[\"value\"] &", "\"y_grid\": y_grid, \"z_grid\": z_grid, \"vox_size\": settings[\"voxel_size\"], \"csys_trans\": tr_mat, \"value\": None}", "tr_mat): \"\"\" Rotate nodes using provided transformation matrix; convert xyz", "return x_grid, y_grid, z_grid def convert_geom(veh_surf, tr_mat): \"\"\" Rotate nodes", "are considered occupied and or'ed with ``group_mask``. If the supplied", "# Special labels applied to specific types of voxels VOXEL_LABELS", "lost. \"\"\" tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1)) minus_ones = -np.ones((len(tris),", "> max_z: break b_above_z, b_below_z = b_z_root.split_at(2, z_pos) if not", "they are subdivided their identity is not lost. \"\"\" tri_nums", "tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False) # Modify node coords so object", "all vehicle geometries in a list of parts. Combine on", "= surf[\"nodes\"] tris = surf[\"tris\"] x_pts, y_pts, z_pts = [voxel_data[k]", "voxel_data if __name__ == \"__main__\": from rpl.tools.api import test_bench_api as", "veh_surf[\"nodes\"] = np.vstack((veh_surf[\"x\"], veh_surf[\"y\"], veh_surf[\"z\"])).T veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3])" ]
[ "locked pad. 2. decrypt(cipher_text,locked_pad,passphrase) - Decrypt the cipher text encrypted", "operated=contain(int(target_list[counter])-pad[counter],ascii_value) result.append(operated) return result def locker(pad,passphrase): cutter=round(len(passphrase)/2) splited=[passphrase[:cutter],passphrase[cutter:]] locker=[0 for", "locker=[0 for counter in range(len(pad))] for element in splited: bloated_seed=sha1(element.encode()).hexdigest()", "cipher text encrypted with SBET. It requires cipher text, locked", "in splited: bloated_seed=sha1(element.encode()).hexdigest() random.seed(bloated_seed) locker=[contain(random.randrange(ascii_value)+element,ascii_value) for element in locker] holder=[]", "element in splited: bloated_seed=sha1(element.encode()).hexdigest() random.seed(bloated_seed) locker=[contain(random.randrange(ascii_value)+element,ascii_value) for element in locker]", "with the given passphrase. Returns cipher text and locked pad.", "1. encrypt(user_input,passphrase) - Encrypt the given string with the given", "cipher_text, locked_pad def decrypt(cipher_text,locked_pad,passphrase): ct=cipher_text.split(' ') lp=locked_pad.split(' ') pad=locker(lp,passphrase) pt=ciphering(ct,pad,True)", "holder=[] for counter in range(len(pad)): operated=int(pad[counter])^locker[counter] holder.append(operated) return holder def", "range(len(pad)): operated=int(pad[counter])^locker[counter] holder.append(operated) return holder def encrypt(user_input,passphrase): compressed=zlib.compress(user_input.encode()) ui_listed=list(compressed) pad=trlist(len(ui_listed),ascii_value)", "pad, and passphrase. ''' # CODE ======================================================================== import zlib import", "for element in locker] holder=[] for counter in range(len(pad)): operated=int(pad[counter])^locker[counter]", "text encrypted with SBET. It requires cipher text, locked pad,", "random.seed(bloated_seed) locker=[contain(random.randrange(ascii_value)+element,ascii_value) for element in locker] holder=[] for counter in", "in range(len(pad))] for element in splited: bloated_seed=sha1(element.encode()).hexdigest() random.seed(bloated_seed) locker=[contain(random.randrange(ascii_value)+element,ascii_value) for", "- Encrypt the given string with the given passphrase. Returns", "element in locker] holder=[] for counter in range(len(pad)): operated=int(pad[counter])^locker[counter] holder.append(operated)", "requires cipher text, locked pad, and passphrase. ''' # CODE", "2. decrypt(cipher_text,locked_pad,passphrase) - Decrypt the cipher text encrypted with SBET.", "range(len(pad)): if decrypt==False: operated=contain(target_list[counter]+pad[counter],ascii_value) else: operated=contain(int(target_list[counter])-pad[counter],ascii_value) result.append(operated) return result def", "silver_bullet.TRNG import trlist from silver_bullet.contain_value import contain ascii_value=256 def ciphering(target_list,pad,decrypt=False):", "text, locked pad, and passphrase. ''' # CODE ======================================================================== import", "locker] holder=[] for counter in range(len(pad)): operated=int(pad[counter])^locker[counter] holder.append(operated) return holder", "in locker] holder=[] for counter in range(len(pad)): operated=int(pad[counter])^locker[counter] holder.append(operated) return", "cutter=round(len(passphrase)/2) splited=[passphrase[:cutter],passphrase[cutter:]] locker=[0 for counter in range(len(pad))] for element in", "passphrase. ''' # CODE ======================================================================== import zlib import random from", "for element in splited: bloated_seed=sha1(element.encode()).hexdigest() random.seed(bloated_seed) locker=[contain(random.randrange(ascii_value)+element,ascii_value) for element in", "result def locker(pad,passphrase): cutter=round(len(passphrase)/2) splited=[passphrase[:cutter],passphrase[cutter:]] locker=[0 for counter in range(len(pad))]", "decrypt(cipher_text,locked_pad,passphrase) - Decrypt the cipher text encrypted with SBET. It", "decrypt(cipher_text,locked_pad,passphrase): ct=cipher_text.split(' ') lp=locked_pad.split(' ') pad=locker(lp,passphrase) pt=ciphering(ct,pad,True) byted=bytes(pt) decompressed=zlib.decompress(byted).decode() return", "encrypt(user_input,passphrase): compressed=zlib.compress(user_input.encode()) ui_listed=list(compressed) pad=trlist(len(ui_listed),ascii_value) ct=ciphering(ui_listed,pad) lp=locker(pad,passphrase) cipher_text=' '.join(map(str,ct)) locked_pad=' '.join(map(str,lp))", "''' # CODE ======================================================================== import zlib import random from hashlib", "Encrypt the given string with the given passphrase. Returns cipher", "SBET. It requires cipher text, locked pad, and passphrase. '''", "pad. 2. decrypt(cipher_text,locked_pad,passphrase) - Decrypt the cipher text encrypted with", "operated=contain(target_list[counter]+pad[counter],ascii_value) else: operated=contain(int(target_list[counter])-pad[counter],ascii_value) result.append(operated) return result def locker(pad,passphrase): cutter=round(len(passphrase)/2) splited=[passphrase[:cutter],passphrase[cutter:]]", "for counter in range(len(pad)): if decrypt==False: operated=contain(target_list[counter]+pad[counter],ascii_value) else: operated=contain(int(target_list[counter])-pad[counter],ascii_value) result.append(operated)", "locker=[contain(random.randrange(ascii_value)+element,ascii_value) for element in locker] holder=[] for counter in range(len(pad)):", "ct=cipher_text.split(' ') lp=locked_pad.split(' ') pad=locker(lp,passphrase) pt=ciphering(ct,pad,True) byted=bytes(pt) decompressed=zlib.decompress(byted).decode() return decompressed", "'.join(map(str,lp)) return cipher_text, locked_pad def decrypt(cipher_text,locked_pad,passphrase): ct=cipher_text.split(' ') lp=locked_pad.split(' ')", "pad=trlist(len(ui_listed),ascii_value) ct=ciphering(ui_listed,pad) lp=locker(pad,passphrase) cipher_text=' '.join(map(str,ct)) locked_pad=' '.join(map(str,lp)) return cipher_text, locked_pad", "import trlist from silver_bullet.contain_value import contain ascii_value=256 def ciphering(target_list,pad,decrypt=False): result=[]", "from silver_bullet.TRNG import trlist from silver_bullet.contain_value import contain ascii_value=256 def", "'.join(map(str,ct)) locked_pad=' '.join(map(str,lp)) return cipher_text, locked_pad def decrypt(cipher_text,locked_pad,passphrase): ct=cipher_text.split(' ')", "counter in range(len(pad))] for element in splited: bloated_seed=sha1(element.encode()).hexdigest() random.seed(bloated_seed) locker=[contain(random.randrange(ascii_value)+element,ascii_value)", "zlib import random from hashlib import sha1 from silver_bullet.TRNG import", "- Decrypt the cipher text encrypted with SBET. It requires", "for counter in range(len(pad))] for element in splited: bloated_seed=sha1(element.encode()).hexdigest() random.seed(bloated_seed)", "ui_listed=list(compressed) pad=trlist(len(ui_listed),ascii_value) ct=ciphering(ui_listed,pad) lp=locker(pad,passphrase) cipher_text=' '.join(map(str,ct)) locked_pad=' '.join(map(str,lp)) return cipher_text,", "the cipher text encrypted with SBET. It requires cipher text,", "result=[] for counter in range(len(pad)): if decrypt==False: operated=contain(target_list[counter]+pad[counter],ascii_value) else: operated=contain(int(target_list[counter])-pad[counter],ascii_value)", "ascii_value=256 def ciphering(target_list,pad,decrypt=False): result=[] for counter in range(len(pad)): if decrypt==False:", "the given string with the given passphrase. Returns cipher text", "splited=[passphrase[:cutter],passphrase[cutter:]] locker=[0 for counter in range(len(pad))] for element in splited:", "and locked pad. 2. decrypt(cipher_text,locked_pad,passphrase) - Decrypt the cipher text", "Returns cipher text and locked pad. 2. decrypt(cipher_text,locked_pad,passphrase) - Decrypt", "operated=int(pad[counter])^locker[counter] holder.append(operated) return holder def encrypt(user_input,passphrase): compressed=zlib.compress(user_input.encode()) ui_listed=list(compressed) pad=trlist(len(ui_listed),ascii_value) ct=ciphering(ui_listed,pad)", "encrypt(user_input,passphrase) - Encrypt the given string with the given passphrase.", "''' >List of functions 1. encrypt(user_input,passphrase) - Encrypt the given", "compressed=zlib.compress(user_input.encode()) ui_listed=list(compressed) pad=trlist(len(ui_listed),ascii_value) ct=ciphering(ui_listed,pad) lp=locker(pad,passphrase) cipher_text=' '.join(map(str,ct)) locked_pad=' '.join(map(str,lp)) return", "# CODE ======================================================================== import zlib import random from hashlib import", "functions 1. encrypt(user_input,passphrase) - Encrypt the given string with the", "encrypted with SBET. It requires cipher text, locked pad, and", "def ciphering(target_list,pad,decrypt=False): result=[] for counter in range(len(pad)): if decrypt==False: operated=contain(target_list[counter]+pad[counter],ascii_value)", "given string with the given passphrase. Returns cipher text and", "holder def encrypt(user_input,passphrase): compressed=zlib.compress(user_input.encode()) ui_listed=list(compressed) pad=trlist(len(ui_listed),ascii_value) ct=ciphering(ui_listed,pad) lp=locker(pad,passphrase) cipher_text=' '.join(map(str,ct))", "locker(pad,passphrase): cutter=round(len(passphrase)/2) splited=[passphrase[:cutter],passphrase[cutter:]] locker=[0 for counter in range(len(pad))] for element", "sha1 from silver_bullet.TRNG import trlist from silver_bullet.contain_value import contain ascii_value=256", "return result def locker(pad,passphrase): cutter=round(len(passphrase)/2) splited=[passphrase[:cutter],passphrase[cutter:]] locker=[0 for counter in", "import random from hashlib import sha1 from silver_bullet.TRNG import trlist", "in range(len(pad)): if decrypt==False: operated=contain(target_list[counter]+pad[counter],ascii_value) else: operated=contain(int(target_list[counter])-pad[counter],ascii_value) result.append(operated) return result", "hashlib import sha1 from silver_bullet.TRNG import trlist from silver_bullet.contain_value import", "ct=ciphering(ui_listed,pad) lp=locker(pad,passphrase) cipher_text=' '.join(map(str,ct)) locked_pad=' '.join(map(str,lp)) return cipher_text, locked_pad def", ">List of functions 1. encrypt(user_input,passphrase) - Encrypt the given string", "result.append(operated) return result def locker(pad,passphrase): cutter=round(len(passphrase)/2) splited=[passphrase[:cutter],passphrase[cutter:]] locker=[0 for counter", "cipher text, locked pad, and passphrase. ''' # CODE ========================================================================", "cipher_text=' '.join(map(str,ct)) locked_pad=' '.join(map(str,lp)) return cipher_text, locked_pad def decrypt(cipher_text,locked_pad,passphrase): ct=cipher_text.split('", "import sha1 from silver_bullet.TRNG import trlist from silver_bullet.contain_value import contain", "import contain ascii_value=256 def ciphering(target_list,pad,decrypt=False): result=[] for counter in range(len(pad)):", "return cipher_text, locked_pad def decrypt(cipher_text,locked_pad,passphrase): ct=cipher_text.split(' ') lp=locked_pad.split(' ') pad=locker(lp,passphrase)", "cipher text and locked pad. 2. decrypt(cipher_text,locked_pad,passphrase) - Decrypt the", "text and locked pad. 2. decrypt(cipher_text,locked_pad,passphrase) - Decrypt the cipher", "bloated_seed=sha1(element.encode()).hexdigest() random.seed(bloated_seed) locker=[contain(random.randrange(ascii_value)+element,ascii_value) for element in locker] holder=[] for counter", "locked_pad def decrypt(cipher_text,locked_pad,passphrase): ct=cipher_text.split(' ') lp=locked_pad.split(' ') pad=locker(lp,passphrase) pt=ciphering(ct,pad,True) byted=bytes(pt)", "in range(len(pad)): operated=int(pad[counter])^locker[counter] holder.append(operated) return holder def encrypt(user_input,passphrase): compressed=zlib.compress(user_input.encode()) ui_listed=list(compressed)", "random from hashlib import sha1 from silver_bullet.TRNG import trlist from", "and passphrase. ''' # CODE ======================================================================== import zlib import random", "for counter in range(len(pad)): operated=int(pad[counter])^locker[counter] holder.append(operated) return holder def encrypt(user_input,passphrase):", "from silver_bullet.contain_value import contain ascii_value=256 def ciphering(target_list,pad,decrypt=False): result=[] for counter", "locked pad, and passphrase. ''' # CODE ======================================================================== import zlib", "CODE ======================================================================== import zlib import random from hashlib import sha1", "contain ascii_value=256 def ciphering(target_list,pad,decrypt=False): result=[] for counter in range(len(pad)): if", "holder.append(operated) return holder def encrypt(user_input,passphrase): compressed=zlib.compress(user_input.encode()) ui_listed=list(compressed) pad=trlist(len(ui_listed),ascii_value) ct=ciphering(ui_listed,pad) lp=locker(pad,passphrase)", "silver_bullet.contain_value import contain ascii_value=256 def ciphering(target_list,pad,decrypt=False): result=[] for counter in", "counter in range(len(pad)): operated=int(pad[counter])^locker[counter] holder.append(operated) return holder def encrypt(user_input,passphrase): compressed=zlib.compress(user_input.encode())", "def decrypt(cipher_text,locked_pad,passphrase): ct=cipher_text.split(' ') lp=locked_pad.split(' ') pad=locker(lp,passphrase) pt=ciphering(ct,pad,True) byted=bytes(pt) decompressed=zlib.decompress(byted).decode()", "trlist from silver_bullet.contain_value import contain ascii_value=256 def ciphering(target_list,pad,decrypt=False): result=[] for", "locked_pad=' '.join(map(str,lp)) return cipher_text, locked_pad def decrypt(cipher_text,locked_pad,passphrase): ct=cipher_text.split(' ') lp=locked_pad.split('", "passphrase. Returns cipher text and locked pad. 2. decrypt(cipher_text,locked_pad,passphrase) -", "import zlib import random from hashlib import sha1 from silver_bullet.TRNG", "the given passphrase. Returns cipher text and locked pad. 2.", "else: operated=contain(int(target_list[counter])-pad[counter],ascii_value) result.append(operated) return result def locker(pad,passphrase): cutter=round(len(passphrase)/2) splited=[passphrase[:cutter],passphrase[cutter:]] locker=[0", "decrypt==False: operated=contain(target_list[counter]+pad[counter],ascii_value) else: operated=contain(int(target_list[counter])-pad[counter],ascii_value) result.append(operated) return result def locker(pad,passphrase): cutter=round(len(passphrase)/2)", "range(len(pad))] for element in splited: bloated_seed=sha1(element.encode()).hexdigest() random.seed(bloated_seed) locker=[contain(random.randrange(ascii_value)+element,ascii_value) for element", "of functions 1. encrypt(user_input,passphrase) - Encrypt the given string with", "counter in range(len(pad)): if decrypt==False: operated=contain(target_list[counter]+pad[counter],ascii_value) else: operated=contain(int(target_list[counter])-pad[counter],ascii_value) result.append(operated) return", "with SBET. It requires cipher text, locked pad, and passphrase.", "======================================================================== import zlib import random from hashlib import sha1 from", "return holder def encrypt(user_input,passphrase): compressed=zlib.compress(user_input.encode()) ui_listed=list(compressed) pad=trlist(len(ui_listed),ascii_value) ct=ciphering(ui_listed,pad) lp=locker(pad,passphrase) cipher_text='", "splited: bloated_seed=sha1(element.encode()).hexdigest() random.seed(bloated_seed) locker=[contain(random.randrange(ascii_value)+element,ascii_value) for element in locker] holder=[] for", "if decrypt==False: operated=contain(target_list[counter]+pad[counter],ascii_value) else: operated=contain(int(target_list[counter])-pad[counter],ascii_value) result.append(operated) return result def locker(pad,passphrase):", "def encrypt(user_input,passphrase): compressed=zlib.compress(user_input.encode()) ui_listed=list(compressed) pad=trlist(len(ui_listed),ascii_value) ct=ciphering(ui_listed,pad) lp=locker(pad,passphrase) cipher_text=' '.join(map(str,ct)) locked_pad='", "ciphering(target_list,pad,decrypt=False): result=[] for counter in range(len(pad)): if decrypt==False: operated=contain(target_list[counter]+pad[counter],ascii_value) else:", "lp=locker(pad,passphrase) cipher_text=' '.join(map(str,ct)) locked_pad=' '.join(map(str,lp)) return cipher_text, locked_pad def decrypt(cipher_text,locked_pad,passphrase):", "It requires cipher text, locked pad, and passphrase. ''' #", "from hashlib import sha1 from silver_bullet.TRNG import trlist from silver_bullet.contain_value", "string with the given passphrase. Returns cipher text and locked", "def locker(pad,passphrase): cutter=round(len(passphrase)/2) splited=[passphrase[:cutter],passphrase[cutter:]] locker=[0 for counter in range(len(pad))] for", "Decrypt the cipher text encrypted with SBET. It requires cipher", "given passphrase. Returns cipher text and locked pad. 2. decrypt(cipher_text,locked_pad,passphrase)" ]
[ "__init__(self, error_element, error_namespace, error_name=None): self.error_name = error_name self.element = ET.Element(error_element)", "def __unicode__(self): if self.error_name is not None: self.element.append(ET.Element(self.error_name)) return unicode(ET.tostring(self.element))", "= ET.Element(error_element) self.element.set(\"xmlns\", error_namespace) # per default all errors are", "all errors are recoverable self.unrecoverable = False def __unicode__(self): if", "see LICENSE for more details. \"\"\" import xml.etree.ElementTree as ET", "per default all errors are recoverable self.unrecoverable = False def", "\"\"\" pyfire.errors ~~~~~~~~~~~~~~~~~~~~~~ Holds the global used base errors :copyright:", "XMPPProtocolError(Exception): \"\"\"Base class for all errors that can be sent", "\"\"\" def __init__(self, error_element, error_namespace, error_name=None): self.error_name = error_name self.element", "class for all errors that can be sent via XMPP", "-*- \"\"\" pyfire.errors ~~~~~~~~~~~~~~~~~~~~~~ Holds the global used base errors", "via XMPP Protocol to peer \"\"\" def __init__(self, error_element, error_namespace,", "def __init__(self, error_element, error_namespace, error_name=None): self.error_name = error_name self.element =", "errors are recoverable self.unrecoverable = False def __unicode__(self): if self.error_name", "LICENSE for more details. \"\"\" import xml.etree.ElementTree as ET class", "# -*- coding: utf-8 -*- \"\"\" pyfire.errors ~~~~~~~~~~~~~~~~~~~~~~ Holds the", "all errors that can be sent via XMPP Protocol to", "recoverable self.unrecoverable = False def __unicode__(self): if self.error_name is not", "peer \"\"\" def __init__(self, error_element, error_namespace, error_name=None): self.error_name = error_name", "# per default all errors are recoverable self.unrecoverable = False", "utf-8 -*- \"\"\" pyfire.errors ~~~~~~~~~~~~~~~~~~~~~~ Holds the global used base", "\"\"\"Base class for all errors that can be sent via", "self.unrecoverable = False def __unicode__(self): if self.error_name is not None:", "= False def __unicode__(self): if self.error_name is not None: self.element.append(ET.Element(self.error_name))", "-*- coding: utf-8 -*- \"\"\" pyfire.errors ~~~~~~~~~~~~~~~~~~~~~~ Holds the global", "more details. \"\"\" import xml.etree.ElementTree as ET class XMPPProtocolError(Exception): \"\"\"Base", "BSD, see LICENSE for more details. \"\"\" import xml.etree.ElementTree as", "False def __unicode__(self): if self.error_name is not None: self.element.append(ET.Element(self.error_name)) return", "base errors :copyright: 2011 by the pyfire Team, see AUTHORS", "error_name=None): self.error_name = error_name self.element = ET.Element(error_element) self.element.set(\"xmlns\", error_namespace) #", ":copyright: 2011 by the pyfire Team, see AUTHORS for more", "to peer \"\"\" def __init__(self, error_element, error_namespace, error_name=None): self.error_name =", "Protocol to peer \"\"\" def __init__(self, error_element, error_namespace, error_name=None): self.error_name", "coding: utf-8 -*- \"\"\" pyfire.errors ~~~~~~~~~~~~~~~~~~~~~~ Holds the global used", "ET.Element(error_element) self.element.set(\"xmlns\", error_namespace) # per default all errors are recoverable", "global used base errors :copyright: 2011 by the pyfire Team,", "XMPP Protocol to peer \"\"\" def __init__(self, error_element, error_namespace, error_name=None):", "as ET class XMPPProtocolError(Exception): \"\"\"Base class for all errors that", "2011 by the pyfire Team, see AUTHORS for more details.", "error_element, error_namespace, error_name=None): self.error_name = error_name self.element = ET.Element(error_element) self.element.set(\"xmlns\",", "the pyfire Team, see AUTHORS for more details. :license: BSD,", "class XMPPProtocolError(Exception): \"\"\"Base class for all errors that can be", "<reponame>RavidLevi98/pyfire # -*- coding: utf-8 -*- \"\"\" pyfire.errors ~~~~~~~~~~~~~~~~~~~~~~ Holds", "default all errors are recoverable self.unrecoverable = False def __unicode__(self):", "~~~~~~~~~~~~~~~~~~~~~~ Holds the global used base errors :copyright: 2011 by", "used base errors :copyright: 2011 by the pyfire Team, see", "self.element.set(\"xmlns\", error_namespace) # per default all errors are recoverable self.unrecoverable", "= error_name self.element = ET.Element(error_element) self.element.set(\"xmlns\", error_namespace) # per default", "\"\"\" import xml.etree.ElementTree as ET class XMPPProtocolError(Exception): \"\"\"Base class for", "are recoverable self.unrecoverable = False def __unicode__(self): if self.error_name is", "Team, see AUTHORS for more details. :license: BSD, see LICENSE", "for all errors that can be sent via XMPP Protocol", "self.element = ET.Element(error_element) self.element.set(\"xmlns\", error_namespace) # per default all errors", "the global used base errors :copyright: 2011 by the pyfire", "that can be sent via XMPP Protocol to peer \"\"\"", "self.error_name = error_name self.element = ET.Element(error_element) self.element.set(\"xmlns\", error_namespace) # per", "by the pyfire Team, see AUTHORS for more details. :license:", "can be sent via XMPP Protocol to peer \"\"\" def", "for more details. :license: BSD, see LICENSE for more details.", "xml.etree.ElementTree as ET class XMPPProtocolError(Exception): \"\"\"Base class for all errors", "be sent via XMPP Protocol to peer \"\"\" def __init__(self,", "for more details. \"\"\" import xml.etree.ElementTree as ET class XMPPProtocolError(Exception):", "see AUTHORS for more details. :license: BSD, see LICENSE for", "import xml.etree.ElementTree as ET class XMPPProtocolError(Exception): \"\"\"Base class for all", "pyfire.errors ~~~~~~~~~~~~~~~~~~~~~~ Holds the global used base errors :copyright: 2011", "error_name self.element = ET.Element(error_element) self.element.set(\"xmlns\", error_namespace) # per default all", "error_namespace, error_name=None): self.error_name = error_name self.element = ET.Element(error_element) self.element.set(\"xmlns\", error_namespace)", "errors that can be sent via XMPP Protocol to peer", "ET class XMPPProtocolError(Exception): \"\"\"Base class for all errors that can", ":license: BSD, see LICENSE for more details. \"\"\" import xml.etree.ElementTree", "AUTHORS for more details. :license: BSD, see LICENSE for more", "details. \"\"\" import xml.etree.ElementTree as ET class XMPPProtocolError(Exception): \"\"\"Base class", "Holds the global used base errors :copyright: 2011 by the", "details. :license: BSD, see LICENSE for more details. \"\"\" import", "error_namespace) # per default all errors are recoverable self.unrecoverable =", "sent via XMPP Protocol to peer \"\"\" def __init__(self, error_element,", "errors :copyright: 2011 by the pyfire Team, see AUTHORS for", "more details. :license: BSD, see LICENSE for more details. \"\"\"", "pyfire Team, see AUTHORS for more details. :license: BSD, see" ]
[ "== 'left': return {'type': 'head-on', 'identifier': 'left', 'direction': direction} elif", "head['x'] == width and direction =='down' or head['x'] == width", "2 else: return len(my_position) def getMyDirection(gamedata): me = gamedata['you'] my_position", "and direction == 'up' or head['x'] == 0 and direction", "head['y'] == height: return {'type': 'corner', 'identifier': 'bottom right', 'direction':", "or head['x'] == 0 and direction == 'down': return {'type':", "return 'left' elif my_position[0]['x'] == my_position[1]['x'] and my_position[0]['y'] < my_position[1]['y']:", "if head['x'] == 0 and head['y'] == 0: return {'type':", "#remove opposite direction if current_direction == 'up': options.remove('down') if current_direction", "== 'down': return {'type': 'parallel', 'identifier': 'left', 'direction': direction} elif", "height: return {'type': 'corner', 'identifier': 'bottom left', 'direction': direction} elif", "Status.getMyLength(gamedata) == 1: return 'none' elif my_position[0]['x'] > my_position[1]['x']: return", "== 0: return {'type': 'corner', 'identifier': 'top right', 'direction': direction}", "= current_direction #in a corner elif Assess.wallProximity(gamedata)['type'] == 'corner': options.remove(current_direction)", "my_position[1]['x']: return 'right' elif my_position[0]['x'] < my_position[1]['x']: return 'left' elif", "height: return {'type': 'corner', 'identifier': 'bottom right', 'direction': direction} #headons", "avoidDeath(): pass def chaseFood(): pass def fleeSnake(): pass def chaseSnake():", "0 and direction == 'up' or head['x'] == 0 and", "{'type': 'parallel', 'identifier': 'right', 'direction': direction} elif head['y'] == height", "else: return 'down' def getHealth(gamedata): pass def getBoardSize(gamedata): board_height =", "current_direction #in a corner elif Assess.wallProximity(gamedata)['type'] == 'corner': options.remove(current_direction) if", "Assess.wallProximity(gamedata)['identifier'][4] == 'r': if 'up' in options: choice = 'down'", "'direction': direction} elif head['y'] == height and direction == 'left'", "def avoidDeath(): pass def chaseFood(): pass def fleeSnake(): pass def", "== 'right': return {'type': 'parallel', 'identifier': 'bottom', 'direction': direction} else:", "elif head['y'] == 0 and direction == 'right' or head['y']", "me['body'] if Status.getMyLength(gamedata) == 1: return 'none' elif my_position[0]['x'] >", "return {'type': 'parallel', 'identifier': 'top', 'direction': direction} elif head['x'] ==", "pass class Action(object): def avoidDeath(): pass def chaseFood(): pass def", "in options: choice = 'down' else: choice = 'right' elif", "current_direction == 'right': options.remove('left') if current_direction == 'left': options.remove('right') #no", "chooseBestOption(gamedata): options = ['up', 'down', 'right', 'left'] current_direction = Status.getMyDirection(gamedata)", "== 'none': choice = random.choice(options) #remove opposite direction if current_direction", "Status.getBoardSize(gamedata) direction = Status.getMyDirection(gamedata) height = board_size['height'] - 1 width", "width and direction == 'right': return {'type': 'head-on', 'identifier': 'right',", "choice = random.choice(options) #remove opposite direction if current_direction == 'up':", "head['y'] == 0 and direction =='left': return {'type': 'parallel', 'identifier':", "== 'left': options.remove('right') #no danger keep going if Assess.wallProximity(gamedata) ==", "gamedata['you'] my_position = me['body'] if Status.getMyLength(gamedata) == 1: return 'none'", "'down' def getHealth(gamedata): pass def getBoardSize(gamedata): board_height = gamedata['board']['height'] board_width", "current_direction = Status.getMyDirection(gamedata) #first go if current_direction == 'none': choice", "else: choice = 'right' elif Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4]", "'direction': direction} #headons elif head['x'] == 0 and direction ==", "head-on or corner\"\"\" head = Status.getHeadPosition(gamedata) board_size = Status.getBoardSize(gamedata) direction", "== my_position[2]: return 2 else: return len(my_position) def getMyDirection(gamedata): me", "def wallProximity(gamedata): \"\"\"returns proximity to a wall either parallel to,", "dimensions def getFoodPositions(gamedata): pass def getSnakesPositions(gamedata): pass class Assess(object): def", "return {'type': 'corner', 'identifier': 'top left', 'direction': direction} elif head['x']", "Assess.wallProximity(gamedata)['identifier'][4] == 'l': if 'up' in options: choice = 'down'", "head = my_position[0] return head def getMyLength(gamedata): me = gamedata['you']", "{'type': 'parallel', 'identifier': 'left', 'direction': direction} elif head['y'] == 0", "{'type': 'corner', 'identifier': 'top right', 'direction': direction} elif head['x'] ==", "width and head['y'] == height: return {'type': 'corner', 'identifier': 'bottom", "pass def chaseSnake(): pass class Decision(object): def chooseBestOption(gamedata): options =", "and direction == 'right': return {'type': 'head-on', 'identifier': 'right', 'direction':", "elif head['y'] == height and direction == 'down': return {'type':", "corner\"\"\" head = Status.getHeadPosition(gamedata) board_size = Status.getBoardSize(gamedata) direction = Status.getMyDirection(gamedata)", "return 2 else: return len(my_position) def getMyDirection(gamedata): me = gamedata['you']", "if 'up' in options: choice = 'down' else: choice =", "#parallel elif Assess.wallProximity(gamedata)['type'] == 'parallel': choice = current_direction else: print(\"shit\")", "head['y'] == 0 and direction == 'up': return {'type': 'head-on',", "return {'type': 'head-on', 'identifier': 'right', 'direction': direction} elif head['y'] ==", "{'height': board_height, 'width': board_width} return dimensions def getFoodPositions(gamedata): pass def", "pass def foodNearby(gamedata): pass class Action(object): def avoidDeath(): pass def", "'none': choice = random.choice(options) #remove opposite direction if current_direction ==", "= random.choice(options) #parallel elif Assess.wallProximity(gamedata)['type'] == 'parallel': choice = current_direction", "'corner', 'identifier': 'top left', 'direction': direction} elif head['x'] == 0", "{'type': 'corner', 'identifier': 'bottom left', 'direction': direction} elif head['x'] ==", "0 and direction == 'right' or head['y'] == 0 and", "{'type': 'head-on', 'identifier': 'left', 'direction': direction} elif head['y'] == 0", "= Status.getHeadPosition(gamedata) board_size = Status.getBoardSize(gamedata) direction = Status.getMyDirection(gamedata) height =", "options: choice = 'down' else: choice = 'right' elif Assess.wallProximity(gamedata)['identifier'][0]", "direction} elif head['x'] == width and head['y'] == height: return", "direction =='left': return {'type': 'parallel', 'identifier': 'top', 'direction': direction} elif", "left', 'direction': direction} elif head['x'] == width and head['y'] ==", "'top left', 'direction': direction} elif head['x'] == 0 and head['y']", "1: return 'none' elif my_position[0]['x'] > my_position[1]['x']: return 'right' elif", "def fleeSnake(): pass def chaseSnake(): pass class Decision(object): def chooseBestOption(gamedata):", "me = gamedata['you'] my_position = me['body'] if Status.getMyLength(gamedata) == 1:", "corner elif Assess.wallProximity(gamedata)['type'] == 'corner': options.remove(current_direction) if Assess.wallProximity(gamedata)['identifier'][0] == 't'", "danger keep going if Assess.wallProximity(gamedata) == False: choice = current_direction", "height and direction == 'right': return {'type': 'parallel', 'identifier': 'bottom',", "0 and direction == 'up': return {'type': 'head-on', 'identifier': 'top',", "Assess.wallProximity(gamedata)['type'] == 'corner': options.remove(current_direction) if Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4]", "smallerSnakeNearby(gamedata): pass def biggerSnakeNearby(gamedata): pass def foodNearby(gamedata): pass class Action(object):", "def getBoardSize(gamedata): board_height = gamedata['board']['height'] board_width = gamedata['board']['width'] dimensions =", "== width and head['y'] == 0: return {'type': 'corner', 'identifier':", "direction} #headons elif head['x'] == 0 and direction == 'left':", "'down', 'right', 'left'] current_direction = Status.getMyDirection(gamedata) #first go if current_direction", "== 'up': return {'type': 'head-on', 'identifier': 'top', 'direction': direction} elif", "= 'down' else: choice = 'left' #headon elif Assess.wallProximity(gamedata)['type'] ==", "== 0 and direction == 'right' or head['y'] == 0", "== my_position[1] == my_position[2]: return 1 elif my_position[1] == my_position[2]:", "== my_position[1]['x'] and my_position[0]['y'] < my_position[1]['y']: return 'up' else: return", "head['y'] == height: return {'type': 'corner', 'identifier': 'bottom left', 'direction':", "class Decision(object): def chooseBestOption(gamedata): options = ['up', 'down', 'right', 'left']", "options.remove('up') if current_direction == 'right': options.remove('left') if current_direction == 'left':", "'up' in options: choice = 'down' else: choice = 'right'", "a corner elif Assess.wallProximity(gamedata)['type'] == 'corner': options.remove(current_direction) if Assess.wallProximity(gamedata)['identifier'][0] ==", "return {'type': 'corner', 'identifier': 'bottom left', 'direction': direction} elif head['x']", "#first go if current_direction == 'none': choice = random.choice(options) #remove", "gamedata['board']['height'] board_width = gamedata['board']['width'] dimensions = {'height': board_height, 'width': board_width}", "ownBodyProximity(gamedata): pass def killPossible(gamedata): pass def smallerSnakeNearby(gamedata): pass def biggerSnakeNearby(gamedata):", "Action(object): def avoidDeath(): pass def chaseFood(): pass def fleeSnake(): pass", "direction} elif head['y'] == height and direction == 'down': return", "return 'right' elif my_position[0]['x'] < my_position[1]['x']: return 'left' elif my_position[0]['x']", "'left' elif my_position[0]['x'] == my_position[1]['x'] and my_position[0]['y'] < my_position[1]['y']: return", "'right': return {'type': 'head-on', 'identifier': 'right', 'direction': direction} elif head['y']", "to a wall either parallel to, head-on or corner\"\"\" head", "= ['up', 'down', 'right', 'left'] current_direction = Status.getMyDirection(gamedata) #first go", "direction = Status.getMyDirection(gamedata) height = board_size['height'] - 1 width =", "def smallerSnakeNearby(gamedata): pass def biggerSnakeNearby(gamedata): pass def foodNearby(gamedata): pass class", "'direction': direction} elif head['y'] == height and direction == 'down':", "elif my_position[1] == my_position[2]: return 2 else: return len(my_position) def", "a wall either parallel to, head-on or corner\"\"\" head =", "killPossible(gamedata): pass def smallerSnakeNearby(gamedata): pass def biggerSnakeNearby(gamedata): pass def foodNearby(gamedata):", "'right', 'left'] current_direction = Status.getMyDirection(gamedata) #first go if current_direction ==", "elif head['x'] == width and head['y'] == height: return {'type':", "direction == 'down': return {'type': 'parallel', 'identifier': 'left', 'direction': direction}", "'t' and Assess.wallProximity(gamedata)['identifier'][4] == 'l': if 'up' in options: choice", "'width': board_width} return dimensions def getFoodPositions(gamedata): pass def getSnakesPositions(gamedata): pass", "going if Assess.wallProximity(gamedata) == False: choice = current_direction #in a", "width and direction =='down' or head['x'] == width and direction", "elif head['x'] == width and direction == 'right': return {'type':", "keep going if Assess.wallProximity(gamedata) == False: choice = current_direction #in", "> my_position[1]['x']: return 'right' elif my_position[0]['x'] < my_position[1]['x']: return 'left'", "'head-on', 'identifier': 'left', 'direction': direction} elif head['y'] == 0 and", "height = board_size['height'] - 1 width = board_size['width'] - 1", "0 and head['y'] == 0: return {'type': 'corner', 'identifier': 'top", "'identifier': 'right', 'direction': direction} elif head['y'] == height and direction", "right', 'direction': direction} elif head['x'] == width and head['y'] ==", "and direction == 'right': return {'type': 'parallel', 'identifier': 'bottom', 'direction':", "== width and direction == 'up': return {'type': 'parallel', 'identifier':", "'right' elif my_position[0]['x'] < my_position[1]['x']: return 'left' elif my_position[0]['x'] ==", "def killPossible(gamedata): pass def smallerSnakeNearby(gamedata): pass def biggerSnakeNearby(gamedata): pass def", "'direction': direction} else: return False def ownBodyProximity(gamedata): pass def killPossible(gamedata):", "'identifier': 'bottom left', 'direction': direction} elif head['x'] == width and", "'bottom left', 'direction': direction} elif head['x'] == width and head['y']", "direction} elif head['x'] == width and head['y'] == 0: return", "== 0: return {'type': 'corner', 'identifier': 'top left', 'direction': direction}", "options = ['up', 'down', 'right', 'left'] current_direction = Status.getMyDirection(gamedata) #first", "my_position[2]: return 2 else: return len(my_position) def getMyDirection(gamedata): me =", "'direction': direction} elif head['y'] == 0 and direction == 'right'", "== height: return {'type': 'corner', 'identifier': 'bottom right', 'direction': direction}", "'direction': direction} elif head['x'] == width and head['y'] == 0:", "head def getMyLength(gamedata): me = gamedata['you'] my_position = me['body'] if", "= Status.getMyDirection(gamedata) #first go if current_direction == 'none': choice =", "direction} elif head['x'] == 0 and head['y'] == height: return", "== width and direction =='down' or head['x'] == width and", "import random class Status(object): def getHeadPosition(gamedata): me = gamedata['you'] my_position", "return {'type': 'head-on', 'identifier': 'left', 'direction': direction} elif head['y'] ==", "current_direction == 'up': options.remove('down') if current_direction == 'down': options.remove('up') if", "Status(object): def getHeadPosition(gamedata): me = gamedata['you'] my_position = me['body'] head", "my_position[1]['x']: return 'left' elif my_position[0]['x'] == my_position[1]['x'] and my_position[0]['y'] <", "= gamedata['board']['width'] dimensions = {'height': board_height, 'width': board_width} return dimensions", "\"\"\"returns proximity to a wall either parallel to, head-on or", "== 0 and direction == 'left': return {'type': 'head-on', 'identifier':", "and my_position[0]['y'] < my_position[1]['y']: return 'up' else: return 'down' def", "wallProximity(gamedata): \"\"\"returns proximity to a wall either parallel to, head-on", "board_height, 'width': board_width} return dimensions def getFoodPositions(gamedata): pass def getSnakesPositions(gamedata):", "'bottom', 'direction': direction} #parrallels elif head['x'] == 0 and direction", "'down': return {'type': 'parallel', 'identifier': 'left', 'direction': direction} elif head['y']", "chaseSnake(): pass class Decision(object): def chooseBestOption(gamedata): options = ['up', 'down',", "opposite direction if current_direction == 'up': options.remove('down') if current_direction ==", "{'type': 'corner', 'identifier': 'top left', 'direction': direction} elif head['x'] ==", "go if current_direction == 'none': choice = random.choice(options) #remove opposite", "pass class Decision(object): def chooseBestOption(gamedata): options = ['up', 'down', 'right',", "current_direction == 'none': choice = random.choice(options) #remove opposite direction if", "my_position[0] == my_position[1] == my_position[2]: return 1 elif my_position[1] ==", "= me['body'] if my_position[0] == my_position[1] == my_position[2]: return 1", "direction == 'right' or head['y'] == 0 and direction =='left':", "=='left': return {'type': 'parallel', 'identifier': 'top', 'direction': direction} elif head['x']", "= 'right' elif Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'r':", "direction if current_direction == 'up': options.remove('down') if current_direction == 'down':", "random class Status(object): def getHeadPosition(gamedata): me = gamedata['you'] my_position =", "'direction': direction} #parrallels elif head['x'] == 0 and direction ==", "0 and head['y'] == height: return {'type': 'corner', 'identifier': 'bottom", "= random.choice(options) #remove opposite direction if current_direction == 'up': options.remove('down')", "getSnakesPositions(gamedata): pass class Assess(object): def wallProximity(gamedata): \"\"\"returns proximity to a", "Status.getMyDirection(gamedata) height = board_size['height'] - 1 width = board_size['width'] -", "and direction == 'left' or head['y'] == height and direction", "'left': return {'type': 'head-on', 'identifier': 'left', 'direction': direction} elif head['y']", "'right' elif Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'r': if", "== 'right': options.remove('left') if current_direction == 'left': options.remove('right') #no danger", "head['x'] == width and head['y'] == height: return {'type': 'corner',", "options.remove('down') if current_direction == 'down': options.remove('up') if current_direction == 'right':", "def getMyLength(gamedata): me = gamedata['you'] my_position = me['body'] if my_position[0]", "['up', 'down', 'right', 'left'] current_direction = Status.getMyDirection(gamedata) #first go if", "= gamedata['you'] my_position = me['body'] if Status.getMyLength(gamedata) == 1: return", "or head['y'] == 0 and direction =='left': return {'type': 'parallel',", "getHealth(gamedata): pass def getBoardSize(gamedata): board_height = gamedata['board']['height'] board_width = gamedata['board']['width']", "= my_position[0] return head def getMyLength(gamedata): me = gamedata['you'] my_position", "return 'up' else: return 'down' def getHealth(gamedata): pass def getBoardSize(gamedata):", "== 'up': return {'type': 'parallel', 'identifier': 'right', 'direction': direction} elif", "elif Assess.wallProximity(gamedata)['type'] == 'corner': options.remove(current_direction) if Assess.wallProximity(gamedata)['identifier'][0] == 't' and", "elif Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'r': if 'up'", "width and direction == 'up': return {'type': 'parallel', 'identifier': 'right',", "and head['y'] == 0: return {'type': 'corner', 'identifier': 'top right',", "left', 'direction': direction} elif head['x'] == 0 and head['y'] ==", "== 'left' or head['y'] == height and direction == 'right':", "'corner', 'identifier': 'bottom left', 'direction': direction} elif head['x'] == width", "elif my_position[0]['x'] == my_position[1]['x'] and my_position[0]['y'] < my_position[1]['y']: return 'up'", "== 'right': return {'type': 'head-on', 'identifier': 'right', 'direction': direction} elif", "== 'right' or head['y'] == 0 and direction =='left': return", "head['y'] == height and direction == 'right': return {'type': 'parallel',", "elif Assess.wallProximity(gamedata)['type'] == 'head-on': options.remove(current_direction) choice = random.choice(options) #parallel elif", "elif head['x'] == width and direction =='down' or head['x'] ==", "== False: choice = current_direction #in a corner elif Assess.wallProximity(gamedata)['type']", "and Assess.wallProximity(gamedata)['identifier'][4] == 'r': if 'up' in options: choice =", "either parallel to, head-on or corner\"\"\" head = Status.getHeadPosition(gamedata) board_size", "#parrallels elif head['x'] == 0 and direction == 'up' or", "False def ownBodyProximity(gamedata): pass def killPossible(gamedata): pass def smallerSnakeNearby(gamedata): pass", "getHeadPosition(gamedata): me = gamedata['you'] my_position = me['body'] head = my_position[0]", "= gamedata['board']['height'] board_width = gamedata['board']['width'] dimensions = {'height': board_height, 'width':", "= board_size['width'] - 1 #corners if head['x'] == 0 and", "direction == 'down': return {'type': 'head-on', 'identifier': 'bottom', 'direction': direction}", "and direction == 'down': return {'type': 'head-on', 'identifier': 'bottom', 'direction':", "Status.getHeadPosition(gamedata) board_size = Status.getBoardSize(gamedata) direction = Status.getMyDirection(gamedata) height = board_size['height']", "elif my_position[0]['x'] < my_position[1]['x']: return 'left' elif my_position[0]['x'] == my_position[1]['x']", "== 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'l': if 'up' in options:", "'parallel', 'identifier': 'left', 'direction': direction} elif head['y'] == 0 and", "== 'parallel': choice = current_direction else: print(\"shit\") print(options) return choice", "board_width} return dimensions def getFoodPositions(gamedata): pass def getSnakesPositions(gamedata): pass class", "options.remove('right') #no danger keep going if Assess.wallProximity(gamedata) == False: choice", "and direction =='left': return {'type': 'parallel', 'identifier': 'top', 'direction': direction}", "'right', 'direction': direction} elif head['y'] == height and direction ==", "return {'type': 'parallel', 'identifier': 'right', 'direction': direction} elif head['y'] ==", "board_height = gamedata['board']['height'] board_width = gamedata['board']['width'] dimensions = {'height': board_height,", "my_position = me['body'] if my_position[0] == my_position[1] == my_position[2]: return", "if current_direction == 'up': options.remove('down') if current_direction == 'down': options.remove('up')", "height and direction == 'left' or head['y'] == height and", "=='down' or head['x'] == width and direction == 'up': return", "getFoodPositions(gamedata): pass def getSnakesPositions(gamedata): pass class Assess(object): def wallProximity(gamedata): \"\"\"returns", "direction =='down' or head['x'] == width and direction == 'up':", "'parallel', 'identifier': 'top', 'direction': direction} elif head['x'] == width and", "elif head['x'] == 0 and direction == 'left': return {'type':", "'left' or head['y'] == height and direction == 'right': return", "'identifier': 'bottom right', 'direction': direction} #headons elif head['x'] == 0", "width and head['y'] == 0: return {'type': 'corner', 'identifier': 'top", "direction == 'up' or head['x'] == 0 and direction ==", "head['y'] == 0: return {'type': 'corner', 'identifier': 'top right', 'direction':", "'up': return {'type': 'head-on', 'identifier': 'top', 'direction': direction} elif head['x']", "- 1 #corners if head['x'] == 0 and head['y'] ==", "'head-on': options.remove(current_direction) choice = random.choice(options) #parallel elif Assess.wallProximity(gamedata)['type'] == 'parallel':", "head = Status.getHeadPosition(gamedata) board_size = Status.getBoardSize(gamedata) direction = Status.getMyDirection(gamedata) height", "< my_position[1]['x']: return 'left' elif my_position[0]['x'] == my_position[1]['x'] and my_position[0]['y']", "my_position = me['body'] head = my_position[0] return head def getMyLength(gamedata):", "== 0 and direction == 'down': return {'type': 'parallel', 'identifier':", "'identifier': 'bottom', 'direction': direction} #parrallels elif head['x'] == 0 and", "height and direction == 'down': return {'type': 'head-on', 'identifier': 'bottom',", "direction} else: return False def ownBodyProximity(gamedata): pass def killPossible(gamedata): pass", "'left' #headon elif Assess.wallProximity(gamedata)['type'] == 'head-on': options.remove(current_direction) choice = random.choice(options)", "== 'l': if 'up' in options: choice = 'down' else:", "else: return len(my_position) def getMyDirection(gamedata): me = gamedata['you'] my_position =", "head['x'] == 0 and direction == 'down': return {'type': 'parallel',", "return head def getMyLength(gamedata): me = gamedata['you'] my_position = me['body']", "== 'down': return {'type': 'head-on', 'identifier': 'bottom', 'direction': direction} #parrallels", "return {'type': 'head-on', 'identifier': 'bottom', 'direction': direction} #parrallels elif head['x']", "direction} elif head['y'] == height and direction == 'left' or", "chaseFood(): pass def fleeSnake(): pass def chaseSnake(): pass class Decision(object):", "in options: choice = 'down' else: choice = 'left' #headon", "pass def biggerSnakeNearby(gamedata): pass def foodNearby(gamedata): pass class Action(object): def", "return 'down' def getHealth(gamedata): pass def getBoardSize(gamedata): board_height = gamedata['board']['height']", "== 'up' or head['x'] == 0 and direction == 'down':", "board_size['height'] - 1 width = board_size['width'] - 1 #corners if", "'direction': direction} elif head['x'] == width and head['y'] == height:", "direction == 'right': return {'type': 'head-on', 'identifier': 'right', 'direction': direction}", "def getHealth(gamedata): pass def getBoardSize(gamedata): board_height = gamedata['board']['height'] board_width =", "biggerSnakeNearby(gamedata): pass def foodNearby(gamedata): pass class Action(object): def avoidDeath(): pass", "def getSnakesPositions(gamedata): pass class Assess(object): def wallProximity(gamedata): \"\"\"returns proximity to", "return {'type': 'parallel', 'identifier': 'left', 'direction': direction} elif head['y'] ==", "me = gamedata['you'] my_position = me['body'] if my_position[0] == my_position[1]", "width = board_size['width'] - 1 #corners if head['x'] == 0", "1 #corners if head['x'] == 0 and head['y'] == 0:", "foodNearby(gamedata): pass class Action(object): def avoidDeath(): pass def chaseFood(): pass", "= Status.getMyDirection(gamedata) height = board_size['height'] - 1 width = board_size['width']", "my_position[0]['x'] > my_position[1]['x']: return 'right' elif my_position[0]['x'] < my_position[1]['x']: return", "my_position[1]['x'] and my_position[0]['y'] < my_position[1]['y']: return 'up' else: return 'down'", "'direction': direction} elif head['y'] == 0 and direction == 'up':", "'direction': direction} elif head['x'] == width and direction =='down' or", "options.remove(current_direction) choice = random.choice(options) #parallel elif Assess.wallProximity(gamedata)['type'] == 'parallel': choice", "choice = 'left' #headon elif Assess.wallProximity(gamedata)['type'] == 'head-on': options.remove(current_direction) choice", "return {'type': 'corner', 'identifier': 'top right', 'direction': direction} elif head['x']", "pass def fleeSnake(): pass def chaseSnake(): pass class Decision(object): def", "random.choice(options) #parallel elif Assess.wallProximity(gamedata)['type'] == 'parallel': choice = current_direction else:", "Assess(object): def wallProximity(gamedata): \"\"\"returns proximity to a wall either parallel", "'identifier': 'bottom', 'direction': direction} else: return False def ownBodyProximity(gamedata): pass", "'identifier': 'top left', 'direction': direction} elif head['x'] == 0 and", "pass class Assess(object): def wallProximity(gamedata): \"\"\"returns proximity to a wall", "Assess.wallProximity(gamedata)['type'] == 'parallel': choice = current_direction else: print(\"shit\") print(options) return", "'down': return {'type': 'head-on', 'identifier': 'bottom', 'direction': direction} #parrallels elif", "head['y'] == height and direction == 'left' or head['y'] ==", "== my_position[2]: return 1 elif my_position[1] == my_position[2]: return 2", "= board_size['height'] - 1 width = board_size['width'] - 1 #corners", "gamedata['you'] my_position = me['body'] if my_position[0] == my_position[1] == my_position[2]:", "if Assess.wallProximity(gamedata) == False: choice = current_direction #in a corner", "{'type': 'head-on', 'identifier': 'top', 'direction': direction} elif head['x'] == width", "0 and direction =='left': return {'type': 'parallel', 'identifier': 'top', 'direction':", "direction} elif head['y'] == 0 and direction == 'up': return", "Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'l': if 'up' in", "board_width = gamedata['board']['width'] dimensions = {'height': board_height, 'width': board_width} return", "'direction': direction} elif head['x'] == width and direction == 'right':", "if current_direction == 'left': options.remove('right') #no danger keep going if", "Decision(object): def chooseBestOption(gamedata): options = ['up', 'down', 'right', 'left'] current_direction", "and direction =='down' or head['x'] == width and direction ==", "wall either parallel to, head-on or corner\"\"\" head = Status.getHeadPosition(gamedata)", "== 0 and direction =='left': return {'type': 'parallel', 'identifier': 'top',", "elif head['y'] == height and direction == 'left' or head['y']", "'down': options.remove('up') if current_direction == 'right': options.remove('left') if current_direction ==", "= gamedata['you'] my_position = me['body'] if my_position[0] == my_position[1] ==", "my_position = me['body'] if Status.getMyLength(gamedata) == 1: return 'none' elif", "my_position[1] == my_position[2]: return 2 else: return len(my_position) def getMyDirection(gamedata):", "== 1: return 'none' elif my_position[0]['x'] > my_position[1]['x']: return 'right'", "False: choice = current_direction #in a corner elif Assess.wallProximity(gamedata)['type'] ==", "choice = random.choice(options) #parallel elif Assess.wallProximity(gamedata)['type'] == 'parallel': choice =", "direction == 'left': return {'type': 'head-on', 'identifier': 'left', 'direction': direction}", "pass def killPossible(gamedata): pass def smallerSnakeNearby(gamedata): pass def biggerSnakeNearby(gamedata): pass", "def chooseBestOption(gamedata): options = ['up', 'down', 'right', 'left'] current_direction =", "'r': if 'up' in options: choice = 'down' else: choice", "= 'left' #headon elif Assess.wallProximity(gamedata)['type'] == 'head-on': options.remove(current_direction) choice =", "pass def getBoardSize(gamedata): board_height = gamedata['board']['height'] board_width = gamedata['board']['width'] dimensions", "== 'r': if 'up' in options: choice = 'down' else:", "'right' or head['y'] == 0 and direction =='left': return {'type':", "choice = 'down' else: choice = 'right' elif Assess.wallProximity(gamedata)['identifier'][0] ==", "my_position[0]['y'] < my_position[1]['y']: return 'up' else: return 'down' def getHealth(gamedata):", "pass def getSnakesPositions(gamedata): pass class Assess(object): def wallProximity(gamedata): \"\"\"returns proximity", "'left', 'direction': direction} elif head['y'] == 0 and direction ==", "== height and direction == 'down': return {'type': 'head-on', 'identifier':", "len(my_position) def getMyDirection(gamedata): me = gamedata['you'] my_position = me['body'] if", "pass def chaseFood(): pass def fleeSnake(): pass def chaseSnake(): pass", "Assess.wallProximity(gamedata)['type'] == 'head-on': options.remove(current_direction) choice = random.choice(options) #parallel elif Assess.wallProximity(gamedata)['type']", "head['x'] == 0 and direction == 'up' or head['x'] ==", "'up': return {'type': 'parallel', 'identifier': 'right', 'direction': direction} elif head['y']", "def ownBodyProximity(gamedata): pass def killPossible(gamedata): pass def smallerSnakeNearby(gamedata): pass def", "#no danger keep going if Assess.wallProximity(gamedata) == False: choice =", "'identifier': 'left', 'direction': direction} elif head['y'] == 0 and direction", "= me['body'] head = my_position[0] return head def getMyLength(gamedata): me", "return {'type': 'parallel', 'identifier': 'bottom', 'direction': direction} else: return False", "== height: return {'type': 'corner', 'identifier': 'bottom left', 'direction': direction}", "my_position[1]['y']: return 'up' else: return 'down' def getHealth(gamedata): pass def", "'none' elif my_position[0]['x'] > my_position[1]['x']: return 'right' elif my_position[0]['x'] <", "choice = current_direction #in a corner elif Assess.wallProximity(gamedata)['type'] == 'corner':", "board_size['width'] - 1 #corners if head['x'] == 0 and head['y']", "return dimensions def getFoodPositions(gamedata): pass def getSnakesPositions(gamedata): pass class Assess(object):", "== height and direction == 'right': return {'type': 'parallel', 'identifier':", "fleeSnake(): pass def chaseSnake(): pass class Decision(object): def chooseBestOption(gamedata): options", "'identifier': 'top', 'direction': direction} elif head['x'] == width and direction", "{'type': 'parallel', 'identifier': 'bottom', 'direction': direction} else: return False def", "- 1 width = board_size['width'] - 1 #corners if head['x']", "head['x'] == 0 and head['y'] == height: return {'type': 'corner',", "head['y'] == 0 and direction == 'right' or head['y'] ==", "'top', 'direction': direction} elif head['x'] == width and direction ==", "'down' else: choice = 'right' elif Assess.wallProximity(gamedata)['identifier'][0] == 't' and", "'bottom right', 'direction': direction} #headons elif head['x'] == 0 and", "'left': options.remove('right') #no danger keep going if Assess.wallProximity(gamedata) == False:", "0: return {'type': 'corner', 'identifier': 'top left', 'direction': direction} elif", "or head['y'] == height and direction == 'right': return {'type':", "parallel to, head-on or corner\"\"\" head = Status.getHeadPosition(gamedata) board_size =", "getMyLength(gamedata): me = gamedata['you'] my_position = me['body'] if my_position[0] ==", "direction} elif head['y'] == 0 and direction == 'right' or", "and direction == 'up': return {'type': 'parallel', 'identifier': 'right', 'direction':", "choice = 'right' elif Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] ==", "right', 'direction': direction} #headons elif head['x'] == 0 and direction", "'direction': direction} elif head['x'] == 0 and head['y'] == height:", "== height and direction == 'left' or head['y'] == height", "'corner', 'identifier': 'top right', 'direction': direction} elif head['x'] == width", "elif head['x'] == 0 and head['y'] == height: return {'type':", "if Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'l': if 'up'", "board_size = Status.getBoardSize(gamedata) direction = Status.getMyDirection(gamedata) height = board_size['height'] -", "== 0 and head['y'] == height: return {'type': 'corner', 'identifier':", "'right': options.remove('left') if current_direction == 'left': options.remove('right') #no danger keep", "== 0 and direction == 'up': return {'type': 'head-on', 'identifier':", "1 width = board_size['width'] - 1 #corners if head['x'] ==", "return {'type': 'head-on', 'identifier': 'top', 'direction': direction} elif head['x'] ==", "to, head-on or corner\"\"\" head = Status.getHeadPosition(gamedata) board_size = Status.getBoardSize(gamedata)", "== 0 and direction == 'up' or head['x'] == 0", "elif head['x'] == width and head['y'] == 0: return {'type':", "return False def ownBodyProximity(gamedata): pass def killPossible(gamedata): pass def smallerSnakeNearby(gamedata):", "< my_position[1]['y']: return 'up' else: return 'down' def getHealth(gamedata): pass", "if current_direction == 'right': options.remove('left') if current_direction == 'left': options.remove('right')", "1 elif my_position[1] == my_position[2]: return 2 else: return len(my_position)", "'head-on', 'identifier': 'right', 'direction': direction} elif head['y'] == height and", "direction} elif head['x'] == width and direction == 'right': return", "Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'r': if 'up' in", "elif Assess.wallProximity(gamedata)['type'] == 'parallel': choice = current_direction else: print(\"shit\") print(options)", "and direction == 'down': return {'type': 'parallel', 'identifier': 'left', 'direction':", "class Assess(object): def wallProximity(gamedata): \"\"\"returns proximity to a wall either", "== width and head['y'] == height: return {'type': 'corner', 'identifier':", "current_direction == 'left': options.remove('right') #no danger keep going if Assess.wallProximity(gamedata)", "and direction == 'left': return {'type': 'head-on', 'identifier': 'left', 'direction':", "== 'head-on': options.remove(current_direction) choice = random.choice(options) #parallel elif Assess.wallProximity(gamedata)['type'] ==", "my_position[0]['x'] < my_position[1]['x']: return 'left' elif my_position[0]['x'] == my_position[1]['x'] and", "'top', 'direction': direction} elif head['x'] == width and direction =='down'", "getMyDirection(gamedata): me = gamedata['you'] my_position = me['body'] if Status.getMyLength(gamedata) ==", "== 'up': options.remove('down') if current_direction == 'down': options.remove('up') if current_direction", "else: choice = 'left' #headon elif Assess.wallProximity(gamedata)['type'] == 'head-on': options.remove(current_direction)", "my_position[0]['x'] == my_position[1]['x'] and my_position[0]['y'] < my_position[1]['y']: return 'up' else:", "{'type': 'corner', 'identifier': 'bottom right', 'direction': direction} #headons elif head['x']", "proximity to a wall either parallel to, head-on or corner\"\"\"", "elif my_position[0]['x'] > my_position[1]['x']: return 'right' elif my_position[0]['x'] < my_position[1]['x']:", "def getFoodPositions(gamedata): pass def getSnakesPositions(gamedata): pass class Assess(object): def wallProximity(gamedata):", "'up': options.remove('down') if current_direction == 'down': options.remove('up') if current_direction ==", "head['x'] == width and direction == 'up': return {'type': 'parallel',", "gamedata['board']['width'] dimensions = {'height': board_height, 'width': board_width} return dimensions def", "'up' in options: choice = 'down' else: choice = 'left'", "gamedata['you'] my_position = me['body'] head = my_position[0] return head def", "'identifier': 'top right', 'direction': direction} elif head['x'] == width and", "if current_direction == 'down': options.remove('up') if current_direction == 'right': options.remove('left')", "#headon elif Assess.wallProximity(gamedata)['type'] == 'head-on': options.remove(current_direction) choice = random.choice(options) #parallel", "== width and direction == 'right': return {'type': 'head-on', 'identifier':", "'right': return {'type': 'parallel', 'identifier': 'bottom', 'direction': direction} else: return", "random.choice(options) #remove opposite direction if current_direction == 'up': options.remove('down') if", "return 1 elif my_position[1] == my_position[2]: return 2 else: return", "head['x'] == 0 and direction == 'left': return {'type': 'head-on',", "== 0 and head['y'] == 0: return {'type': 'corner', 'identifier':", "class Action(object): def avoidDeath(): pass def chaseFood(): pass def fleeSnake():", "'head-on', 'identifier': 'top', 'direction': direction} elif head['x'] == width and", "= Status.getBoardSize(gamedata) direction = Status.getMyDirection(gamedata) height = board_size['height'] - 1", "'top right', 'direction': direction} elif head['x'] == width and head['y']", "= 'down' else: choice = 'right' elif Assess.wallProximity(gamedata)['identifier'][0] == 't'", "and head['y'] == height: return {'type': 'corner', 'identifier': 'bottom right',", "0 and direction == 'down': return {'type': 'parallel', 'identifier': 'left',", "choice = 'down' else: choice = 'left' #headon elif Assess.wallProximity(gamedata)['type']", "if current_direction == 'none': choice = random.choice(options) #remove opposite direction", "'up' or head['x'] == 0 and direction == 'down': return", "direction == 'up': return {'type': 'head-on', 'identifier': 'top', 'direction': direction}", "and head['y'] == height: return {'type': 'corner', 'identifier': 'bottom left',", "me = gamedata['you'] my_position = me['body'] head = my_position[0] return", "and head['y'] == 0: return {'type': 'corner', 'identifier': 'top left',", "if Status.getMyLength(gamedata) == 1: return 'none' elif my_position[0]['x'] > my_position[1]['x']:", "= me['body'] if Status.getMyLength(gamedata) == 1: return 'none' elif my_position[0]['x']", "or corner\"\"\" head = Status.getHeadPosition(gamedata) board_size = Status.getBoardSize(gamedata) direction =", "0 and direction == 'left': return {'type': 'head-on', 'identifier': 'left',", "Assess.wallProximity(gamedata) == False: choice = current_direction #in a corner elif", "{'type': 'parallel', 'identifier': 'top', 'direction': direction} elif head['x'] == width", "'l': if 'up' in options: choice = 'down' else: choice", "my_position[0] return head def getMyLength(gamedata): me = gamedata['you'] my_position =", "my_position[1] == my_position[2]: return 1 elif my_position[1] == my_position[2]: return", "pass def smallerSnakeNearby(gamedata): pass def biggerSnakeNearby(gamedata): pass def foodNearby(gamedata): pass", "'up' else: return 'down' def getHealth(gamedata): pass def getBoardSize(gamedata): board_height", "#headons elif head['x'] == 0 and direction == 'left': return", "direction == 'up': return {'type': 'parallel', 'identifier': 'right', 'direction': direction}", "= {'height': board_height, 'width': board_width} return dimensions def getFoodPositions(gamedata): pass", "elif head['x'] == 0 and direction == 'up' or head['x']", "my_position[2]: return 1 elif my_position[1] == my_position[2]: return 2 else:", "def chaseSnake(): pass class Decision(object): def chooseBestOption(gamedata): options = ['up',", "def biggerSnakeNearby(gamedata): pass def foodNearby(gamedata): pass class Action(object): def avoidDeath():", "'head-on', 'identifier': 'bottom', 'direction': direction} #parrallels elif head['x'] == 0", "return len(my_position) def getMyDirection(gamedata): me = gamedata['you'] my_position = me['body']", "options.remove('left') if current_direction == 'left': options.remove('right') #no danger keep going", "#in a corner elif Assess.wallProximity(gamedata)['type'] == 'corner': options.remove(current_direction) if Assess.wallProximity(gamedata)['identifier'][0]", "'corner': options.remove(current_direction) if Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'l':", "options.remove(current_direction) if Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'l': if", "and Assess.wallProximity(gamedata)['identifier'][4] == 'l': if 'up' in options: choice =", "else: return False def ownBodyProximity(gamedata): pass def killPossible(gamedata): pass def", "def getMyDirection(gamedata): me = gamedata['you'] my_position = me['body'] if Status.getMyLength(gamedata)", "if my_position[0] == my_position[1] == my_position[2]: return 1 elif my_position[1]", "head['x'] == 0 and head['y'] == 0: return {'type': 'corner',", "== 'corner': options.remove(current_direction) if Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] ==", "return {'type': 'corner', 'identifier': 'bottom right', 'direction': direction} #headons elif", "'bottom', 'direction': direction} else: return False def ownBodyProximity(gamedata): pass def", "def foodNearby(gamedata): pass class Action(object): def avoidDeath(): pass def chaseFood():", "'parallel', 'identifier': 'bottom', 'direction': direction} else: return False def ownBodyProximity(gamedata):", "{'type': 'head-on', 'identifier': 'bottom', 'direction': direction} #parrallels elif head['x'] ==", "'left'] current_direction = Status.getMyDirection(gamedata) #first go if current_direction == 'none':", "direction == 'right': return {'type': 'parallel', 'identifier': 'bottom', 'direction': direction}", "direction} elif head['x'] == width and direction =='down' or head['x']", "or head['x'] == width and direction == 'up': return {'type':", "head['x'] == width and head['y'] == 0: return {'type': 'corner',", "direction == 'left' or head['y'] == height and direction ==", "options: choice = 'down' else: choice = 'left' #headon elif", "'down' else: choice = 'left' #headon elif Assess.wallProximity(gamedata)['type'] == 'head-on':", "head['y'] == 0: return {'type': 'corner', 'identifier': 'top left', 'direction':", "me['body'] if my_position[0] == my_position[1] == my_position[2]: return 1 elif", "head['x'] == width and direction == 'right': return {'type': 'head-on',", "getBoardSize(gamedata): board_height = gamedata['board']['height'] board_width = gamedata['board']['width'] dimensions = {'height':", "== 'down': options.remove('up') if current_direction == 'right': options.remove('left') if current_direction", "0: return {'type': 'corner', 'identifier': 'top right', 'direction': direction} elif", "class Status(object): def getHeadPosition(gamedata): me = gamedata['you'] my_position = me['body']", "direction} #parrallels elif head['x'] == 0 and direction == 'up'", "def getHeadPosition(gamedata): me = gamedata['you'] my_position = me['body'] head =", "def chaseFood(): pass def fleeSnake(): pass def chaseSnake(): pass class", "return 'none' elif my_position[0]['x'] > my_position[1]['x']: return 'right' elif my_position[0]['x']", "and direction == 'up': return {'type': 'head-on', 'identifier': 'top', 'direction':", "'t' and Assess.wallProximity(gamedata)['identifier'][4] == 'r': if 'up' in options: choice", "Status.getMyDirection(gamedata) #first go if current_direction == 'none': choice = random.choice(options)", "'parallel', 'identifier': 'right', 'direction': direction} elif head['y'] == height and", "and direction == 'right' or head['y'] == 0 and direction", "dimensions = {'height': board_height, 'width': board_width} return dimensions def getFoodPositions(gamedata):", "elif head['y'] == 0 and direction == 'up': return {'type':", "current_direction == 'down': options.remove('up') if current_direction == 'right': options.remove('left') if", "== 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'r': if 'up' in options:", "{'type': 'head-on', 'identifier': 'right', 'direction': direction} elif head['y'] == height", "'corner', 'identifier': 'bottom right', 'direction': direction} #headons elif head['x'] ==", "= gamedata['you'] my_position = me['body'] head = my_position[0] return head", "#corners if head['x'] == 0 and head['y'] == 0: return", "me['body'] head = my_position[0] return head def getMyLength(gamedata): me =", "head['y'] == height and direction == 'down': return {'type': 'head-on'," ]
[ "str, has_master: bool, master_only: bool, bullet: bool, mates: bool) ->", "score.mate() if mate is not None: return 1 if mate", "import chess import chess.engine from model import EngineMove, NextMovePair from", "480) except: return True def exclude_rating(line: str, mates: bool) ->", "False try: return int(line[11:15]) < (1501 if mates else 1600)", "side: Color) -> bool: return material_diff(board, side) > 0 def", "math.exp(-0.004 * cp)) - 1 if cp is not None", "def avg_knps(): global nps return round(sum(nps) / len(nps) / 1000)", "def exclude_rating(line: str, mates: bool) -> bool: if not line.startswith(\"[WhiteElo", ">= 160 else: return total < 160 or total >=", "- material_count(board, not side) def is_up_in_material(board: Board, side: Color) ->", "\"\"\" mate = score.mate() if mate is not None: return", "return material_count(board, side) - material_count(board, not side) def is_up_in_material(board: Board,", "Color, Board from chess.pgn import GameNode from chess.engine import SimpleEngine,", "side) > 0 def get_next_move_pair(engine: SimpleEngine, node: GameNode, winner: Color,", "limit) global nps nps.append(info[0][\"nps\"]) nps = nps[-20:] # print(info) best", "limit = limit) global nps nps.append(info[0][\"nps\"]) nps = nps[-20:] #", "not None: return 1 if mate > 0 else -1", "-> NextMovePair: info = engine.analyse(node.board(), multipv = 2, limit =", "-1 cp = score.score() return 2 / (1 + math.exp(-0.004", "bullet: bool, mates: bool) -> bool: if not line.startswith(\"[TimeControl \"):", "{ chess.PAWN: 1, chess.KNIGHT: 3, chess.BISHOP: 3, chess.ROOK: 5, chess.QUEEN:", "bool: if not line.startswith(\"[TimeControl \"): return False if master_only and", "return total < 160 or total >= 480 else: return", "NextMovePair from chess import Color, Board from chess.pgn import GameNode", "= int(seconds) + int(increment) * 40 if master_only or mates:", "return False try: return int(line[11:15]) < (1501 if mates else", "= { chess.PAWN: 1, chess.KNIGHT: 3, chess.BISHOP: 3, chess.ROOK: 5,", "-1 to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525 \"\"\" mate = score.mate() if mate", "cp is not None else 0 CORRESP_TIME = 999999 def", "material_count(board, side) - material_count(board, not side) def is_up_in_material(board: Board, side:", "best = EngineMove(info[0][\"pv\"][0], info[0][\"score\"].pov(winner)) second = EngineMove(info[1][\"pv\"][0], info[1][\"score\"].pov(winner)) if len(info)", "total >= 160 else: return total < 160 or total", "master_only or mates: if bullet: return total < 30 or", "< 160 or total >= 480 else: return total <", "9 } return sum(len(board.pieces(piece_type, side)) * value for piece_type, value", "nps[-20:] # print(info) best = EngineMove(info[0][\"pv\"][0], info[0][\"score\"].pov(winner)) second = EngineMove(info[1][\"pv\"][0],", "if not line.startswith(\"[WhiteElo \") and not line.startswith(\"[BlackElo \"): return False", "second = EngineMove(info[1][\"pv\"][0], info[1][\"score\"].pov(winner)) if len(info) > 1 else None", "not None else 0 CORRESP_TIME = 999999 def reject_by_time_control(line: str,", "+ int(increment) * 40 if master_only or mates: if bullet:", "bool, mates: bool) -> bool: if not line.startswith(\"[TimeControl \"): return", "= line[1:][:-2].split()[1].replace(\"\\\"\", \"\").split(\"+\") total = int(seconds) + int(increment) * 40", "total < 160 or total >= 480 else: return total", "str, mates: bool) -> bool: if not line.startswith(\"[WhiteElo \") and", "# print(info) best = EngineMove(info[0][\"pv\"][0], info[0][\"score\"].pov(winner)) second = EngineMove(info[1][\"pv\"][0], info[1][\"score\"].pov(winner))", "else 0 def win_chances(score: Score) -> float: \"\"\" winning chances", "if master_only and not has_master: return True try: seconds, increment", "info = engine.analyse(node.board(), multipv = 2, limit = limit) global", "Score) -> float: \"\"\" winning chances from -1 to 1", "int: return material_count(board, side) - material_count(board, not side) def is_up_in_material(board:", "import math import chess import chess.engine from model import EngineMove,", "/ 1000) if nps else 0 def win_chances(score: Score) ->", "return True try: seconds, increment = line[1:][:-2].split()[1].replace(\"\\\"\", \"\").split(\"+\") total =", "chess.engine import SimpleEngine, Score nps = [] def material_count(board: Board,", "side) def is_up_in_material(board: Board, side: Color) -> bool: return material_diff(board,", "= EngineMove(info[1][\"pv\"][0], info[1][\"score\"].pov(winner)) if len(info) > 1 else None return", "= engine.analyse(node.board(), multipv = 2, limit = limit) global nps", "chess.QUEEN: 9 } return sum(len(board.pieces(piece_type, side)) * value for piece_type,", "return material_diff(board, side) > 0 def get_next_move_pair(engine: SimpleEngine, node: GameNode,", "win_chances(score: Score) -> float: \"\"\" winning chances from -1 to", "value in values.items()) def material_diff(board: Board, side: Color) -> int:", "multipv = 2, limit = limit) global nps nps.append(info[0][\"nps\"]) nps", "Color) -> int: values = { chess.PAWN: 1, chess.KNIGHT: 3,", "GameNode from chess.engine import SimpleEngine, Score nps = [] def", "from model import EngineMove, NextMovePair from chess import Color, Board", "nps return round(sum(nps) / len(nps) / 1000) if nps else", "return 1 if mate > 0 else -1 cp =", "return round(sum(nps) / len(nps) / 1000) if nps else 0", "(1 + math.exp(-0.004 * cp)) - 1 if cp is", "master_only and not has_master: return True try: seconds, increment =", "GameNode, winner: Color, limit: chess.engine.Limit) -> NextMovePair: info = engine.analyse(node.board(),", "* cp)) - 1 if cp is not None else", "side) - material_count(board, not side) def is_up_in_material(board: Board, side: Color)", "NextMovePair(node, winner, best, second) def avg_knps(): global nps return round(sum(nps)", "round(sum(nps) / len(nps) / 1000) if nps else 0 def", "score.score() return 2 / (1 + math.exp(-0.004 * cp)) -", "def get_next_move_pair(engine: SimpleEngine, node: GameNode, winner: Color, limit: chess.engine.Limit) ->", "return NextMovePair(node, winner, best, second) def avg_knps(): global nps return", "total >= 480 else: return total < (160 if has_master", "except: return True def exclude_rating(line: str, mates: bool) -> bool:", "side: Color) -> int: values = { chess.PAWN: 1, chess.KNIGHT:", "int: values = { chess.PAWN: 1, chess.KNIGHT: 3, chess.BISHOP: 3,", "values = { chess.PAWN: 1, chess.KNIGHT: 3, chess.BISHOP: 3, chess.ROOK:", "is_up_in_material(board: Board, side: Color) -> bool: return material_diff(board, side) >", "limit: chess.engine.Limit) -> NextMovePair: info = engine.analyse(node.board(), multipv = 2,", "3, chess.ROOK: 5, chess.QUEEN: 9 } return sum(len(board.pieces(piece_type, side)) *", "value for piece_type, value in values.items()) def material_diff(board: Board, side:", "mate is not None: return 1 if mate > 0", "or mates: if bullet: return total < 30 or total", "else 480) except: return True def exclude_rating(line: str, mates: bool)", "\"): return False if master_only and not has_master: return True", "get_next_move_pair(engine: SimpleEngine, node: GameNode, winner: Color, limit: chess.engine.Limit) -> NextMovePair:", "has_master: return True try: seconds, increment = line[1:][:-2].split()[1].replace(\"\\\"\", \"\").split(\"+\") total", "to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525 \"\"\" mate = score.mate() if mate is", "if len(info) > 1 else None return NextMovePair(node, winner, best,", "from dataclasses import dataclass import math import chess import chess.engine", "0 else -1 cp = score.score() return 2 / (1", "return 2 / (1 + math.exp(-0.004 * cp)) - 1", "nps = nps[-20:] # print(info) best = EngineMove(info[0][\"pv\"][0], info[0][\"score\"].pov(winner)) second", "bool) -> bool: if not line.startswith(\"[WhiteElo \") and not line.startswith(\"[BlackElo", "None return NextMovePair(node, winner, best, second) def avg_knps(): global nps", "True try: seconds, increment = line[1:][:-2].split()[1].replace(\"\\\"\", \"\").split(\"+\") total = int(seconds)", "avg_knps(): global nps return round(sum(nps) / len(nps) / 1000) if", "line.startswith(\"[WhiteElo \") and not line.startswith(\"[BlackElo \"): return False try: return", "if has_master else 480) except: return True def exclude_rating(line: str,", "best, second) def avg_knps(): global nps return round(sum(nps) / len(nps)", "480 else: return total < (160 if has_master else 480)", "dataclasses import dataclass import math import chess import chess.engine from", "chess.KNIGHT: 3, chess.BISHOP: 3, chess.ROOK: 5, chess.QUEEN: 9 } return", "= [] def material_count(board: Board, side: Color) -> int: values", "def is_up_in_material(board: Board, side: Color) -> bool: return material_diff(board, side)", "5, chess.QUEEN: 9 } return sum(len(board.pieces(piece_type, side)) * value for", "-> float: \"\"\" winning chances from -1 to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525", "if mate > 0 else -1 cp = score.score() return", "= score.score() return 2 / (1 + math.exp(-0.004 * cp))", "not line.startswith(\"[TimeControl \"): return False if master_only and not has_master:", "total < (160 if has_master else 480) except: return True", "Board, side: Color) -> int: return material_count(board, side) - material_count(board,", "line.startswith(\"[BlackElo \"): return False try: return int(line[11:15]) < (1501 if", "mate > 0 else -1 cp = score.score() return 2", "values.items()) def material_diff(board: Board, side: Color) -> int: return material_count(board,", "-> int: return material_count(board, side) - material_count(board, not side) def", "None: return 1 if mate > 0 else -1 cp", "< 30 or total >= 160 else: return total <", "mate = score.mate() if mate is not None: return 1", "in values.items()) def material_diff(board: Board, side: Color) -> int: return", "chess.engine.Limit) -> NextMovePair: info = engine.analyse(node.board(), multipv = 2, limit", "winner: Color, limit: chess.engine.Limit) -> NextMovePair: info = engine.analyse(node.board(), multipv", "import chess.engine from model import EngineMove, NextMovePair from chess import", "0 def win_chances(score: Score) -> float: \"\"\" winning chances from", "not has_master: return True try: seconds, increment = line[1:][:-2].split()[1].replace(\"\\\"\", \"\").split(\"+\")", "side: Color) -> int: return material_count(board, side) - material_count(board, not", "Board, side: Color) -> int: values = { chess.PAWN: 1,", "math import chess import chess.engine from model import EngineMove, NextMovePair", "/ len(nps) / 1000) if nps else 0 def win_chances(score:", ">= 480 else: return total < (160 if has_master else", "len(nps) / 1000) if nps else 0 def win_chances(score: Score)", "for piece_type, value in values.items()) def material_diff(board: Board, side: Color)", "global nps return round(sum(nps) / len(nps) / 1000) if nps", "True def exclude_rating(line: str, mates: bool) -> bool: if not", "not line.startswith(\"[BlackElo \"): return False try: return int(line[11:15]) < (1501", "material_count(board: Board, side: Color) -> int: values = { chess.PAWN:", "\"): return False try: return int(line[11:15]) < (1501 if mates", "int(line[11:15]) < (1501 if mates else 1600) except: return True", "return True def exclude_rating(line: str, mates: bool) -> bool: if", "sum(len(board.pieces(piece_type, side)) * value for piece_type, value in values.items()) def", "Board, side: Color) -> bool: return material_diff(board, side) > 0", "-> int: values = { chess.PAWN: 1, chess.KNIGHT: 3, chess.BISHOP:", "1, chess.KNIGHT: 3, chess.BISHOP: 3, chess.ROOK: 5, chess.QUEEN: 9 }", "return False if master_only and not has_master: return True try:", "mates: bool) -> bool: if not line.startswith(\"[WhiteElo \") and not", "master_only: bool, bullet: bool, mates: bool) -> bool: if not", "float: \"\"\" winning chances from -1 to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525 \"\"\"", "int(seconds) + int(increment) * 40 if master_only or mates: if", "mates: if bullet: return total < 30 or total >=", "return int(line[11:15]) < (1501 if mates else 1600) except: return", "if not line.startswith(\"[TimeControl \"): return False if master_only and not", "if bullet: return total < 30 or total >= 160", "material_count(board, not side) def is_up_in_material(board: Board, side: Color) -> bool:", "or total >= 480 else: return total < (160 if", "import SimpleEngine, Score nps = [] def material_count(board: Board, side:", "import GameNode from chess.engine import SimpleEngine, Score nps = []", "chess.pgn import GameNode from chess.engine import SimpleEngine, Score nps =", "nps.append(info[0][\"nps\"]) nps = nps[-20:] # print(info) best = EngineMove(info[0][\"pv\"][0], info[0][\"score\"].pov(winner))", "\"\"\" winning chances from -1 to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525 \"\"\" mate", "cp = score.score() return 2 / (1 + math.exp(-0.004 *", "1 if cp is not None else 0 CORRESP_TIME =", "bool, bullet: bool, mates: bool) -> bool: if not line.startswith(\"[TimeControl", "try: seconds, increment = line[1:][:-2].split()[1].replace(\"\\\"\", \"\").split(\"+\") total = int(seconds) +", "chess.BISHOP: 3, chess.ROOK: 5, chess.QUEEN: 9 } return sum(len(board.pieces(piece_type, side))", "total = int(seconds) + int(increment) * 40 if master_only or", "False if master_only and not has_master: return True try: seconds,", "bullet: return total < 30 or total >= 160 else:", "import Color, Board from chess.pgn import GameNode from chess.engine import", "from chess.pgn import GameNode from chess.engine import SimpleEngine, Score nps", "model import EngineMove, NextMovePair from chess import Color, Board from", "len(info) > 1 else None return NextMovePair(node, winner, best, second)", "Score nps = [] def material_count(board: Board, side: Color) ->", "import EngineMove, NextMovePair from chess import Color, Board from chess.pgn", "node: GameNode, winner: Color, limit: chess.engine.Limit) -> NextMovePair: info =", "chess import chess.engine from model import EngineMove, NextMovePair from chess", "else None return NextMovePair(node, winner, best, second) def avg_knps(): global", "winner, best, second) def avg_knps(): global nps return round(sum(nps) /", "-> bool: return material_diff(board, side) > 0 def get_next_move_pair(engine: SimpleEngine,", "1 if mate > 0 else -1 cp = score.score()", "piece_type, value in values.items()) def material_diff(board: Board, side: Color) ->", "1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525 \"\"\" mate = score.mate() if mate is not", "> 0 else -1 cp = score.score() return 2 /", "def reject_by_time_control(line: str, has_master: bool, master_only: bool, bullet: bool, mates:", "total < 30 or total >= 160 else: return total", "not line.startswith(\"[WhiteElo \") and not line.startswith(\"[BlackElo \"): return False try:", "1000) if nps else 0 def win_chances(score: Score) -> float:", "= score.mate() if mate is not None: return 1 if", "< (160 if has_master else 480) except: return True def", "\") and not line.startswith(\"[BlackElo \"): return False try: return int(line[11:15])", "2 / (1 + math.exp(-0.004 * cp)) - 1 if", "-> bool: if not line.startswith(\"[WhiteElo \") and not line.startswith(\"[BlackElo \"):", "material_diff(board, side) > 0 def get_next_move_pair(engine: SimpleEngine, node: GameNode, winner:", "has_master else 480) except: return True def exclude_rating(line: str, mates:", "chess.ROOK: 5, chess.QUEEN: 9 } return sum(len(board.pieces(piece_type, side)) * value", "second) def avg_knps(): global nps return round(sum(nps) / len(nps) /", "and not line.startswith(\"[BlackElo \"): return False try: return int(line[11:15]) <", "https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525 \"\"\" mate = score.mate() if mate is not None:", "= 999999 def reject_by_time_control(line: str, has_master: bool, master_only: bool, bullet:", "try: return int(line[11:15]) < (1501 if mates else 1600) except:", "[] def material_count(board: Board, side: Color) -> int: values =", "from -1 to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525 \"\"\" mate = score.mate() if", "40 if master_only or mates: if bullet: return total <", "info[1][\"score\"].pov(winner)) if len(info) > 1 else None return NextMovePair(node, winner,", "> 1 else None return NextMovePair(node, winner, best, second) def", "2, limit = limit) global nps nps.append(info[0][\"nps\"]) nps = nps[-20:]", "0 CORRESP_TIME = 999999 def reject_by_time_control(line: str, has_master: bool, master_only:", "winning chances from -1 to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525 \"\"\" mate =", "bool) -> bool: if not line.startswith(\"[TimeControl \"): return False if", "cp)) - 1 if cp is not None else 0", "chances from -1 to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525 \"\"\" mate = score.mate()", "return sum(len(board.pieces(piece_type, side)) * value for piece_type, value in values.items())", "else -1 cp = score.score() return 2 / (1 +", "CORRESP_TIME = 999999 def reject_by_time_control(line: str, has_master: bool, master_only: bool,", "has_master: bool, master_only: bool, bullet: bool, mates: bool) -> bool:", "global nps nps.append(info[0][\"nps\"]) nps = nps[-20:] # print(info) best =", "= nps[-20:] # print(info) best = EngineMove(info[0][\"pv\"][0], info[0][\"score\"].pov(winner)) second =", "bool: if not line.startswith(\"[WhiteElo \") and not line.startswith(\"[BlackElo \"): return", "None else 0 CORRESP_TIME = 999999 def reject_by_time_control(line: str, has_master:", "EngineMove(info[0][\"pv\"][0], info[0][\"score\"].pov(winner)) second = EngineMove(info[1][\"pv\"][0], info[1][\"score\"].pov(winner)) if len(info) > 1", "= 2, limit = limit) global nps nps.append(info[0][\"nps\"]) nps =", "if nps else 0 def win_chances(score: Score) -> float: \"\"\"", "+ math.exp(-0.004 * cp)) - 1 if cp is not", "return total < (160 if has_master else 480) except: return", "material_diff(board: Board, side: Color) -> int: return material_count(board, side) -", "and not has_master: return True try: seconds, increment = line[1:][:-2].split()[1].replace(\"\\\"\",", "SimpleEngine, Score nps = [] def material_count(board: Board, side: Color)", "Color, limit: chess.engine.Limit) -> NextMovePair: info = engine.analyse(node.board(), multipv =", "1 else None return NextMovePair(node, winner, best, second) def avg_knps():", "if master_only or mates: if bullet: return total < 30", "if mate is not None: return 1 if mate >", "999999 def reject_by_time_control(line: str, has_master: bool, master_only: bool, bullet: bool,", "else: return total < 160 or total >= 480 else:", "(160 if has_master else 480) except: return True def exclude_rating(line:", "chess import Color, Board from chess.pgn import GameNode from chess.engine", "line.startswith(\"[TimeControl \"): return False if master_only and not has_master: return", "from chess.engine import SimpleEngine, Score nps = [] def material_count(board:", "engine.analyse(node.board(), multipv = 2, limit = limit) global nps nps.append(info[0][\"nps\"])", "else: return total < (160 if has_master else 480) except:", "- 1 if cp is not None else 0 CORRESP_TIME", "nps = [] def material_count(board: Board, side: Color) -> int:", "* 40 if master_only or mates: if bullet: return total", "reject_by_time_control(line: str, has_master: bool, master_only: bool, bullet: bool, mates: bool)", "160 or total >= 480 else: return total < (160", "160 else: return total < 160 or total >= 480", "chess.engine from model import EngineMove, NextMovePair from chess import Color,", "seconds, increment = line[1:][:-2].split()[1].replace(\"\\\"\", \"\").split(\"+\") total = int(seconds) + int(increment)", "SimpleEngine, node: GameNode, winner: Color, limit: chess.engine.Limit) -> NextMovePair: info", "import dataclass import math import chess import chess.engine from model", "else 0 CORRESP_TIME = 999999 def reject_by_time_control(line: str, has_master: bool,", "print(info) best = EngineMove(info[0][\"pv\"][0], info[0][\"score\"].pov(winner)) second = EngineMove(info[1][\"pv\"][0], info[1][\"score\"].pov(winner)) if", "\"\").split(\"+\") total = int(seconds) + int(increment) * 40 if master_only", "30 or total >= 160 else: return total < 160", "exclude_rating(line: str, mates: bool) -> bool: if not line.startswith(\"[WhiteElo \")", "> 0 def get_next_move_pair(engine: SimpleEngine, node: GameNode, winner: Color, limit:", "side)) * value for piece_type, value in values.items()) def material_diff(board:", "if cp is not None else 0 CORRESP_TIME = 999999", "or total >= 160 else: return total < 160 or", "Board from chess.pgn import GameNode from chess.engine import SimpleEngine, Score", "chess.PAWN: 1, chess.KNIGHT: 3, chess.BISHOP: 3, chess.ROOK: 5, chess.QUEEN: 9", "not side) def is_up_in_material(board: Board, side: Color) -> bool: return", "3, chess.BISHOP: 3, chess.ROOK: 5, chess.QUEEN: 9 } return sum(len(board.pieces(piece_type,", "* value for piece_type, value in values.items()) def material_diff(board: Board,", "is not None else 0 CORRESP_TIME = 999999 def reject_by_time_control(line:", "info[0][\"score\"].pov(winner)) second = EngineMove(info[1][\"pv\"][0], info[1][\"score\"].pov(winner)) if len(info) > 1 else", "is not None: return 1 if mate > 0 else", "= limit) global nps nps.append(info[0][\"nps\"]) nps = nps[-20:] # print(info)", "mates: bool) -> bool: if not line.startswith(\"[TimeControl \"): return False", "increment = line[1:][:-2].split()[1].replace(\"\\\"\", \"\").split(\"+\") total = int(seconds) + int(increment) *", "} return sum(len(board.pieces(piece_type, side)) * value for piece_type, value in", "Color) -> bool: return material_diff(board, side) > 0 def get_next_move_pair(engine:", "def material_diff(board: Board, side: Color) -> int: return material_count(board, side)", "dataclass import math import chess import chess.engine from model import", "EngineMove, NextMovePair from chess import Color, Board from chess.pgn import", "0 def get_next_move_pair(engine: SimpleEngine, node: GameNode, winner: Color, limit: chess.engine.Limit)", "nps nps.append(info[0][\"nps\"]) nps = nps[-20:] # print(info) best = EngineMove(info[0][\"pv\"][0],", "/ (1 + math.exp(-0.004 * cp)) - 1 if cp", "int(increment) * 40 if master_only or mates: if bullet: return", "line[1:][:-2].split()[1].replace(\"\\\"\", \"\").split(\"+\") total = int(seconds) + int(increment) * 40 if", "bool, master_only: bool, bullet: bool, mates: bool) -> bool: if", "def win_chances(score: Score) -> float: \"\"\" winning chances from -1", "nps else 0 def win_chances(score: Score) -> float: \"\"\" winning", "NextMovePair: info = engine.analyse(node.board(), multipv = 2, limit = limit)", "-> bool: if not line.startswith(\"[TimeControl \"): return False if master_only", "= EngineMove(info[0][\"pv\"][0], info[0][\"score\"].pov(winner)) second = EngineMove(info[1][\"pv\"][0], info[1][\"score\"].pov(winner)) if len(info) >", "return total < 30 or total >= 160 else: return", "def material_count(board: Board, side: Color) -> int: values = {", "Color) -> int: return material_count(board, side) - material_count(board, not side)", "from chess import Color, Board from chess.pgn import GameNode from", "EngineMove(info[1][\"pv\"][0], info[1][\"score\"].pov(winner)) if len(info) > 1 else None return NextMovePair(node,", "bool: return material_diff(board, side) > 0 def get_next_move_pair(engine: SimpleEngine, node:" ]
[ "from datetime import datetime def main(): while True: a =", "= datetime.now() if n.hour == 6 and (n.minute-(n.minute%5)) == 15:", "time, morning from datetime import datetime def main(): while True:", "time.mktime(datetime.now().timetuple()) n = datetime.now() if n.hour == 6 and (n.minute-(n.minute%5))", "True: a = time.mktime(datetime.now().timetuple()) n = datetime.now() if n.hour ==", "morning from datetime import datetime def main(): while True: a", "datetime.now() if n.hour == 6 and (n.minute-(n.minute%5)) == 15: return", "== 6 and (n.minute-(n.minute%5)) == 15: return morning.main() time.sleep(300 -", "datetime def main(): while True: a = time.mktime(datetime.now().timetuple()) n =", "def main(): while True: a = time.mktime(datetime.now().timetuple()) n = datetime.now()", "= time.mktime(datetime.now().timetuple()) n = datetime.now() if n.hour == 6 and", "6 and (n.minute-(n.minute%5)) == 15: return morning.main() time.sleep(300 - (time.mktime(datetime.now().timetuple())-a))", "main(): while True: a = time.mktime(datetime.now().timetuple()) n = datetime.now() if", "a = time.mktime(datetime.now().timetuple()) n = datetime.now() if n.hour == 6", "n.hour == 6 and (n.minute-(n.minute%5)) == 15: return morning.main() time.sleep(300", "n = datetime.now() if n.hour == 6 and (n.minute-(n.minute%5)) ==", "if n.hour == 6 and (n.minute-(n.minute%5)) == 15: return morning.main()", "datetime import datetime def main(): while True: a = time.mktime(datetime.now().timetuple())", "import time, morning from datetime import datetime def main(): while", "while True: a = time.mktime(datetime.now().timetuple()) n = datetime.now() if n.hour", "import datetime def main(): while True: a = time.mktime(datetime.now().timetuple()) n" ]
[ "inside a context manager context_mock = Mock() invoke_context_mock.return_value.__enter__.return_value = context_mock", "@patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock): invoke_context_mock.side_effect = InvalidSamDocumentException(\"bad template\") with self.assertRaises(UserException)", "service_mock = Mock() local_lambda_service_mock.return_value = service_mock self.call_cli() invoke_context_mock.assert_called_with(template_file=self.template, function_identifier=None, env_vars_file=self.env_vars,", "samcli.commands.local.start_lambda.cli import do_cli as start_lambda_cli from samcli.commands.local.cli_common.user_exceptions import UserException from", "@patch(\"samcli.commands.local.start_lambda.cli.LocalLambdaService\") def test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock, invoke_context_mock): # Mock the __enter__ method", "from mock import patch, Mock from samcli.commands.local.start_lambda.cli import do_cli as", "class TestCli(TestCase): def setUp(self): self.template = \"template\" self.env_vars = \"env-vars\"", "host=self.host, port=self.port, template=self.template, env_vars=self.env_vars, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file,", "= service_mock self.call_cli() invoke_context_mock.assert_called_with(template_file=self.template, function_identifier=None, env_vars_file=self.env_vars, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image,", "self.template = \"template\" self.env_vars = \"env-vars\" self.debug_port = 123 self.debug_args", "env vars\" self.assertEquals(msg, expected) def call_cli(self): start_lambda_cli(ctx=None, host=self.host, port=self.port, template=self.template,", "expected) @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock): invoke_context_mock.side_effect = OverridesNotWellDefinedError(\"bad env vars\")", "host=self.host) service_mock.start.assert_called_with() @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock): invoke_context_mock.side_effect = InvalidSamDocumentException(\"bad template\")", "with self.assertRaises(UserException) as context: self.call_cli() msg = str(context.exception) expected =", "msg = str(context.exception) expected = \"bad env vars\" self.assertEquals(msg, expected)", "= Mock() local_lambda_service_mock.return_value = service_mock self.call_cli() invoke_context_mock.assert_called_with(template_file=self.template, function_identifier=None, env_vars_file=self.env_vars, docker_volume_basedir=self.docker_volume_basedir,", "template\") with self.assertRaises(UserException) as context: self.call_cli() msg = str(context.exception) expected", "self.env_vars = \"env-vars\" self.debug_port = 123 self.debug_args = \"args\" self.debugger_path", "msg = str(context.exception) expected = \"bad template\" self.assertEquals(msg, expected) @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\")", "debug_args=self.debug_args, debugger_path=self.debugger_path, aws_region=self.region, parameter_overrides=self.parameter_overrides) local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock, port=self.port, host=self.host) service_mock.start.assert_called_with() @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def", "template=self.template, env_vars=self.env_vars, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, profile=self.profile,", "aws_profile=self.profile, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, aws_region=self.region, parameter_overrides=self.parameter_overrides) local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock, port=self.port, host=self.host) service_mock.start.assert_called_with()", "import patch, Mock from samcli.commands.local.start_lambda.cli import do_cli as start_lambda_cli from", "True self.profile = \"profile\" self.region = \"region\" self.parameter_overrides = {}", "# Mock the __enter__ method to return a object inside", "invoke_context_mock.return_value.__enter__.return_value = context_mock service_mock = Mock() local_lambda_service_mock.return_value = service_mock self.call_cli()", "context: self.call_cli() msg = str(context.exception) expected = \"bad env vars\"", "local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock, port=self.port, host=self.host) service_mock.start.assert_called_with() @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock): invoke_context_mock.side_effect =", "\"basedir\" self.docker_network = \"network\" self.log_file = \"logfile\" self.skip_pull_image = True", "test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock): invoke_context_mock.side_effect = OverridesNotWellDefinedError(\"bad env vars\") with self.assertRaises(UserException) as", "str(context.exception) expected = \"bad env vars\" self.assertEquals(msg, expected) def call_cli(self):", "vars\") with self.assertRaises(UserException) as context: self.call_cli() msg = str(context.exception) expected", "skip_pull_image=self.skip_pull_image, aws_profile=self.profile, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, aws_region=self.region, parameter_overrides=self.parameter_overrides) local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock, port=self.port, host=self.host)", "context: self.call_cli() msg = str(context.exception) expected = \"bad template\" self.assertEquals(msg,", "call_cli(self): start_lambda_cli(ctx=None, host=self.host, port=self.port, template=self.template, env_vars=self.env_vars, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, docker_volume_basedir=self.docker_volume_basedir,", "\"host\" self.port = 123 @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") @patch(\"samcli.commands.local.start_lambda.cli.LocalLambdaService\") def test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock, invoke_context_mock):", "the __enter__ method to return a object inside a context", "setUp(self): self.template = \"template\" self.env_vars = \"env-vars\" self.debug_port = 123", "Mock() local_lambda_service_mock.return_value = service_mock self.call_cli() invoke_context_mock.assert_called_with(template_file=self.template, function_identifier=None, env_vars_file=self.env_vars, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network,", "= \"template\" self.env_vars = \"env-vars\" self.debug_port = 123 self.debug_args =", "import InvalidSamDocumentException from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError class TestCli(TestCase): def setUp(self):", "do_cli as start_lambda_cli from samcli.commands.local.cli_common.user_exceptions import UserException from samcli.commands.validate.lib.exceptions import", "template\" self.assertEquals(msg, expected) @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock): invoke_context_mock.side_effect = OverridesNotWellDefinedError(\"bad", "self.log_file = \"logfile\" self.skip_pull_image = True self.profile = \"profile\" self.region", "start_lambda_cli(ctx=None, host=self.host, port=self.port, template=self.template, env_vars=self.env_vars, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network,", "= \"region\" self.parameter_overrides = {} self.host = \"host\" self.port =", "123 @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") @patch(\"samcli.commands.local.start_lambda.cli.LocalLambdaService\") def test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock, invoke_context_mock): # Mock the", "\"args\" self.debugger_path = \"/test/path\" self.docker_volume_basedir = \"basedir\" self.docker_network = \"network\"", "function_identifier=None, env_vars_file=self.env_vars, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, aws_profile=self.profile, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path,", "start_lambda_cli from samcli.commands.local.cli_common.user_exceptions import UserException from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException from", "samcli.commands.validate.lib.exceptions import InvalidSamDocumentException from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError class TestCli(TestCase): def", "env_vars_file=self.env_vars, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, aws_profile=self.profile, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, aws_region=self.region,", "= True self.profile = \"profile\" self.region = \"region\" self.parameter_overrides =", "context_mock service_mock = Mock() local_lambda_service_mock.return_value = service_mock self.call_cli() invoke_context_mock.assert_called_with(template_file=self.template, function_identifier=None,", "from unittest import TestCase from mock import patch, Mock from", "self.parameter_overrides = {} self.host = \"host\" self.port = 123 @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\")", "= str(context.exception) expected = \"bad env vars\" self.assertEquals(msg, expected) def", "unittest import TestCase from mock import patch, Mock from samcli.commands.local.start_lambda.cli", "def setUp(self): self.template = \"template\" self.env_vars = \"env-vars\" self.debug_port =", "OverridesNotWellDefinedError class TestCli(TestCase): def setUp(self): self.template = \"template\" self.env_vars =", "{} self.host = \"host\" self.port = 123 @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") @patch(\"samcli.commands.local.start_lambda.cli.LocalLambdaService\") def", "a object inside a context manager context_mock = Mock() invoke_context_mock.return_value.__enter__.return_value", "context manager context_mock = Mock() invoke_context_mock.return_value.__enter__.return_value = context_mock service_mock =", "docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, aws_profile=self.profile, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, aws_region=self.region, parameter_overrides=self.parameter_overrides) local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock,", "parameter_overrides=self.parameter_overrides) local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock, port=self.port, host=self.host) service_mock.start.assert_called_with() @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock): invoke_context_mock.side_effect", "123 self.debug_args = \"args\" self.debugger_path = \"/test/path\" self.docker_volume_basedir = \"basedir\"", "Mock the __enter__ method to return a object inside a", "service_mock.start.assert_called_with() @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock): invoke_context_mock.side_effect = InvalidSamDocumentException(\"bad template\") with", "self.debug_args = \"args\" self.debugger_path = \"/test/path\" self.docker_volume_basedir = \"basedir\" self.docker_network", "method to return a object inside a context manager context_mock", "@patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") @patch(\"samcli.commands.local.start_lambda.cli.LocalLambdaService\") def test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock, invoke_context_mock): # Mock the __enter__", "\"bad template\" self.assertEquals(msg, expected) @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock): invoke_context_mock.side_effect =", "vars\" self.assertEquals(msg, expected) def call_cli(self): start_lambda_cli(ctx=None, host=self.host, port=self.port, template=self.template, env_vars=self.env_vars,", "\"bad env vars\" self.assertEquals(msg, expected) def call_cli(self): start_lambda_cli(ctx=None, host=self.host, port=self.port,", "self.assertEquals(msg, expected) def call_cli(self): start_lambda_cli(ctx=None, host=self.host, port=self.port, template=self.template, env_vars=self.env_vars, debug_port=self.debug_port,", "= \"host\" self.port = 123 @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") @patch(\"samcli.commands.local.start_lambda.cli.LocalLambdaService\") def test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock,", "= \"args\" self.debugger_path = \"/test/path\" self.docker_volume_basedir = \"basedir\" self.docker_network =", "expected = \"bad env vars\" self.assertEquals(msg, expected) def call_cli(self): start_lambda_cli(ctx=None,", "\"/test/path\" self.docker_volume_basedir = \"basedir\" self.docker_network = \"network\" self.log_file = \"logfile\"", "patch, Mock from samcli.commands.local.start_lambda.cli import do_cli as start_lambda_cli from samcli.commands.local.cli_common.user_exceptions", "import TestCase from mock import patch, Mock from samcli.commands.local.start_lambda.cli import", "from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError class TestCli(TestCase):", "port=self.port, host=self.host) service_mock.start.assert_called_with() @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock): invoke_context_mock.side_effect = InvalidSamDocumentException(\"bad", "Mock() invoke_context_mock.return_value.__enter__.return_value = context_mock service_mock = Mock() local_lambda_service_mock.return_value = service_mock", "\"network\" self.log_file = \"logfile\" self.skip_pull_image = True self.profile = \"profile\"", "= \"profile\" self.region = \"region\" self.parameter_overrides = {} self.host =", "test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock, invoke_context_mock): # Mock the __enter__ method to return", "to return a object inside a context manager context_mock =", "= \"logfile\" self.skip_pull_image = True self.profile = \"profile\" self.region =", "@patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock): invoke_context_mock.side_effect = OverridesNotWellDefinedError(\"bad env vars\") with", "= 123 self.debug_args = \"args\" self.debugger_path = \"/test/path\" self.docker_volume_basedir =", "invoke_context_mock.side_effect = OverridesNotWellDefinedError(\"bad env vars\") with self.assertRaises(UserException) as context: self.call_cli()", "as context: self.call_cli() msg = str(context.exception) expected = \"bad env", "as start_lambda_cli from samcli.commands.local.cli_common.user_exceptions import UserException from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException", "UserException from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError class", "self.docker_network = \"network\" self.log_file = \"logfile\" self.skip_pull_image = True self.profile", "self.assertEquals(msg, expected) @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock): invoke_context_mock.side_effect = OverridesNotWellDefinedError(\"bad env", "= Mock() invoke_context_mock.return_value.__enter__.return_value = context_mock service_mock = Mock() local_lambda_service_mock.return_value =", "self.call_cli() msg = str(context.exception) expected = \"bad template\" self.assertEquals(msg, expected)", "\"env-vars\" self.debug_port = 123 self.debug_args = \"args\" self.debugger_path = \"/test/path\"", "def test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock, invoke_context_mock): # Mock the __enter__ method to", "from samcli.commands.local.cli_common.user_exceptions import UserException from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException from samcli.commands.local.lib.exceptions", "context_mock = Mock() invoke_context_mock.return_value.__enter__.return_value = context_mock service_mock = Mock() local_lambda_service_mock.return_value", "local_lambda_service_mock.return_value = service_mock self.call_cli() invoke_context_mock.assert_called_with(template_file=self.template, function_identifier=None, env_vars_file=self.env_vars, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file,", "import UserException from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError", "= \"basedir\" self.docker_network = \"network\" self.log_file = \"logfile\" self.skip_pull_image =", "= \"/test/path\" self.docker_volume_basedir = \"basedir\" self.docker_network = \"network\" self.log_file =", "self.region = \"region\" self.parameter_overrides = {} self.host = \"host\" self.port", "expected = \"bad template\" self.assertEquals(msg, expected) @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock):", "docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, aws_profile=self.profile, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, aws_region=self.region, parameter_overrides=self.parameter_overrides)", "\"profile\" self.region = \"region\" self.parameter_overrides = {} self.host = \"host\"", "invoke_context_mock.side_effect = InvalidSamDocumentException(\"bad template\") with self.assertRaises(UserException) as context: self.call_cli() msg", "= \"bad template\" self.assertEquals(msg, expected) @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock): invoke_context_mock.side_effect", "env vars\") with self.assertRaises(UserException) as context: self.call_cli() msg = str(context.exception)", "= \"bad env vars\" self.assertEquals(msg, expected) def call_cli(self): start_lambda_cli(ctx=None, host=self.host,", "import OverridesNotWellDefinedError class TestCli(TestCase): def setUp(self): self.template = \"template\" self.env_vars", "port=self.port, template=self.template, env_vars=self.env_vars, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image,", "self.host = \"host\" self.port = 123 @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") @patch(\"samcli.commands.local.start_lambda.cli.LocalLambdaService\") def test_cli_must_setup_context_and_start_service(self,", "as context: self.call_cli() msg = str(context.exception) expected = \"bad template\"", "import do_cli as start_lambda_cli from samcli.commands.local.cli_common.user_exceptions import UserException from samcli.commands.validate.lib.exceptions", "<reponame>ourobouros/aws-sam-cli from unittest import TestCase from mock import patch, Mock", "manager context_mock = Mock() invoke_context_mock.return_value.__enter__.return_value = context_mock service_mock = Mock()", "def test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock): invoke_context_mock.side_effect = InvalidSamDocumentException(\"bad template\") with self.assertRaises(UserException) as", "test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock): invoke_context_mock.side_effect = InvalidSamDocumentException(\"bad template\") with self.assertRaises(UserException) as context:", "return a object inside a context manager context_mock = Mock()", "invoke_context_mock): invoke_context_mock.side_effect = InvalidSamDocumentException(\"bad template\") with self.assertRaises(UserException) as context: self.call_cli()", "= \"network\" self.log_file = \"logfile\" self.skip_pull_image = True self.profile =", "invoke_context_mock): invoke_context_mock.side_effect = OverridesNotWellDefinedError(\"bad env vars\") with self.assertRaises(UserException) as context:", "from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError class TestCli(TestCase): def setUp(self): self.template =", "__enter__ method to return a object inside a context manager", "InvalidSamDocumentException(\"bad template\") with self.assertRaises(UserException) as context: self.call_cli() msg = str(context.exception)", "self.assertRaises(UserException) as context: self.call_cli() msg = str(context.exception) expected = \"bad", "log_file=self.log_file, skip_pull_image=self.skip_pull_image, aws_profile=self.profile, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, aws_region=self.region, parameter_overrides=self.parameter_overrides) local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock, port=self.port,", "str(context.exception) expected = \"bad template\" self.assertEquals(msg, expected) @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_env_vars(self,", "env_vars=self.env_vars, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, profile=self.profile, region=self.region,", "= {} self.host = \"host\" self.port = 123 @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") @patch(\"samcli.commands.local.start_lambda.cli.LocalLambdaService\")", "invoke_context_mock.assert_called_with(template_file=self.template, function_identifier=None, env_vars_file=self.env_vars, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, aws_profile=self.profile, debug_port=self.debug_port, debug_args=self.debug_args,", "from samcli.commands.local.start_lambda.cli import do_cli as start_lambda_cli from samcli.commands.local.cli_common.user_exceptions import UserException", "\"logfile\" self.skip_pull_image = True self.profile = \"profile\" self.region = \"region\"", "OverridesNotWellDefinedError(\"bad env vars\") with self.assertRaises(UserException) as context: self.call_cli() msg =", "= context_mock service_mock = Mock() local_lambda_service_mock.return_value = service_mock self.call_cli() invoke_context_mock.assert_called_with(template_file=self.template,", "aws_region=self.region, parameter_overrides=self.parameter_overrides) local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock, port=self.port, host=self.host) service_mock.start.assert_called_with() @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock):", "debugger_path=self.debugger_path, aws_region=self.region, parameter_overrides=self.parameter_overrides) local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock, port=self.port, host=self.host) service_mock.start.assert_called_with() @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def test_must_raise_user_exception_on_invalid_sam_template(self,", "object inside a context manager context_mock = Mock() invoke_context_mock.return_value.__enter__.return_value =", "= OverridesNotWellDefinedError(\"bad env vars\") with self.assertRaises(UserException) as context: self.call_cli() msg", "service_mock self.call_cli() invoke_context_mock.assert_called_with(template_file=self.template, function_identifier=None, env_vars_file=self.env_vars, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, aws_profile=self.profile,", "def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock): invoke_context_mock.side_effect = OverridesNotWellDefinedError(\"bad env vars\") with self.assertRaises(UserException)", "= 123 @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") @patch(\"samcli.commands.local.start_lambda.cli.LocalLambdaService\") def test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock, invoke_context_mock): # Mock", "def call_cli(self): start_lambda_cli(ctx=None, host=self.host, port=self.port, template=self.template, env_vars=self.env_vars, debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path,", "Mock from samcli.commands.local.start_lambda.cli import do_cli as start_lambda_cli from samcli.commands.local.cli_common.user_exceptions import", "TestCase from mock import patch, Mock from samcli.commands.local.start_lambda.cli import do_cli", "a context manager context_mock = Mock() invoke_context_mock.return_value.__enter__.return_value = context_mock service_mock", "\"template\" self.env_vars = \"env-vars\" self.debug_port = 123 self.debug_args = \"args\"", "= \"env-vars\" self.debug_port = 123 self.debug_args = \"args\" self.debugger_path =", "self.docker_volume_basedir = \"basedir\" self.docker_network = \"network\" self.log_file = \"logfile\" self.skip_pull_image", "invoke_context_mock): # Mock the __enter__ method to return a object", "debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, profile=self.profile, region=self.region, parameter_overrides=self.parameter_overrides)", "InvalidSamDocumentException from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError class TestCli(TestCase): def setUp(self): self.template", "self.profile = \"profile\" self.region = \"region\" self.parameter_overrides = {} self.host", "expected) def call_cli(self): start_lambda_cli(ctx=None, host=self.host, port=self.port, template=self.template, env_vars=self.env_vars, debug_port=self.debug_port, debug_args=self.debug_args,", "self.debugger_path = \"/test/path\" self.docker_volume_basedir = \"basedir\" self.docker_network = \"network\" self.log_file", "debug_port=self.debug_port, debug_args=self.debug_args, debugger_path=self.debugger_path, aws_region=self.region, parameter_overrides=self.parameter_overrides) local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock, port=self.port, host=self.host) service_mock.start.assert_called_with() @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\")", "self.debug_port = 123 self.debug_args = \"args\" self.debugger_path = \"/test/path\" self.docker_volume_basedir", "self.call_cli() msg = str(context.exception) expected = \"bad env vars\" self.assertEquals(msg,", "= InvalidSamDocumentException(\"bad template\") with self.assertRaises(UserException) as context: self.call_cli() msg =", "samcli.commands.local.cli_common.user_exceptions import UserException from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException from samcli.commands.local.lib.exceptions import", "self.port = 123 @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") @patch(\"samcli.commands.local.start_lambda.cli.LocalLambdaService\") def test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock, invoke_context_mock): #", "local_lambda_service_mock, invoke_context_mock): # Mock the __enter__ method to return a", "self.skip_pull_image = True self.profile = \"profile\" self.region = \"region\" self.parameter_overrides", "= str(context.exception) expected = \"bad template\" self.assertEquals(msg, expected) @patch(\"samcli.commands.local.start_lambda.cli.InvokeContext\") def", "TestCli(TestCase): def setUp(self): self.template = \"template\" self.env_vars = \"env-vars\" self.debug_port", "mock import patch, Mock from samcli.commands.local.start_lambda.cli import do_cli as start_lambda_cli", "samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError class TestCli(TestCase): def setUp(self): self.template = \"template\"", "self.call_cli() invoke_context_mock.assert_called_with(template_file=self.template, function_identifier=None, env_vars_file=self.env_vars, docker_volume_basedir=self.docker_volume_basedir, docker_network=self.docker_network, log_file=self.log_file, skip_pull_image=self.skip_pull_image, aws_profile=self.profile, debug_port=self.debug_port,", "\"region\" self.parameter_overrides = {} self.host = \"host\" self.port = 123" ]
[ "def test_get_data_correct_df(self): \"\"\"Whether it returns correct df when input is", "indicator type self.assertEquals( Getter(table_name=IndicatorType, df_flag=True, param_list=self.param_list_indicator_type).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df() ), True)", "# Returns correct df for indicator type self.assertEquals( Getter(table_name=IndicatorType, df_flag=True,", "strategies.models import IndicatorType class GetDataTestCase(TestCase): def setUp(self) -> None: #", "\"abcd\"}))).to_df() ), True) def test_get_data_invalid_inputs(self): self.assertRaises(TypeError, Getter(table_name=\"IndicatorTyp\", df_flag=True, param_list={\"name\": \"abcd\"}).get_data)", ".getters import Getter from .converter import Converter from strategies.models import", "description='desc') Company.objects.create(name='abce', ticker='ABCE', description='desc') # Dummy indicator data IndicatorType.objects.create(name='abc', description='desc')", "IndicatorType.objects.create(name='abcd', description='desc') IndicatorType.objects.create(name='abce', description='desc') self.param_list_company = {\"name\": \"abc\", \"ticker\": 'ABC',", "setUp(self) -> None: # Dummy company data Company.objects.create(name='abc', ticker='ABC', description='desc')", "'ABC', \"description\": 'desc'} self.param_list_indicator_type = {\"name\": \"abc\", \"description\": 'desc'} def", "self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).df_flag, True) def test_input_df_flag(self): \"\"\"Only df_flag input is", "self.assertEquals(Getter(table_name=Company, param_list={\"description\": 'desc'}).get_data(), list(Company.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=Company, param_list={\"name\": \"abcd\"}).get_data(), list(Company.objects.filter(**{\"name\": \"abcd\"})))", "company self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True,", "Company.objects.create(name='abc', ticker='ABC', description='desc') Company.objects.create(name='abcd', ticker='ABCD', description='desc') Company.objects.create(name='abce', ticker='ABCE', description='desc') #", "df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True,", "self.assertEquals( Getter(table_name=IndicatorType, df_flag=True, param_list=self.param_list_indicator_type).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True,", "Converter(obj_list=list(IndicatorType.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"name\":", "self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) def", "import Getter from .converter import Converter from strategies.models import Company", "pandas as pd from .getters import Getter from .converter import", "company self.assertEquals(Getter(table_name=Company, df_flag=False, param_list=self.param_list_company).get_data(), list(Company.objects.filter(**self.param_list_company))) self.assertEquals(Getter(table_name=Company, param_list={\"description\": 'desc'}).get_data(), list(Company.objects.filter(**{\"description\": 'desc'})))", ".equals( Converter(obj_list=list(Company.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) # Returns correct df for", "description='desc') Company.objects.create(name='abcd', ticker='ABCD', description='desc') Company.objects.create(name='abce', ticker='ABCE', description='desc') # Dummy indicator", "\"abc\", \"description\": 'desc'} def test_input_none(self): \"\"\"No inputs are given\"\"\" self.assertEquals(Getter().table_name,", "from strategies.models import Company from strategies.models import IndicatorType class GetDataTestCase(TestCase):", "True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"description\": 'desc'}))).to_df() ), True)", "input is correct\"\"\" # Returns correct df for company self.assertEquals(Getter(table_name=Company,", "IndicatorType.objects.create(name='abce', description='desc') self.param_list_company = {\"name\": \"abc\", \"ticker\": 'ABC', \"description\": 'desc'}", "Converter(obj_list=list(Company.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) # Returns correct df for indicator", "Converter from strategies.models import Company from strategies.models import IndicatorType class", "\"ticker\": 'ABC', \"description\": 'desc'} self.param_list_indicator_type = {\"name\": \"abc\", \"description\": 'desc'}", "import pandas as pd from .getters import Getter from .converter", "df_flag=True, param_list=self.param_list_indicator_type).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"description\": 'desc'}).get_data()", "self.assertRaises(TypeError, Getter(table_name=\"IndicatorTyp\", df_flag=True, param_list={\"name\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=IndicatorType, param_list={\"nam\": \"abcd\"}).get_data) self.assertRaises(TypeError,", "= {\"name\": \"abc\", \"ticker\": 'ABC', \"description\": 'desc'} self.param_list_indicator_type = {\"name\":", "Getter from .converter import Converter from strategies.models import Company from", "self.assertEquals( Getter(table_name=IndicatorType, df_flag=False, param_list=self.param_list_indicator_type).get_data(), list(IndicatorType.objects.filter(**self.param_list_indicator_type))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"description\": 'desc'}).get_data(), list(IndicatorType.objects.filter(**{\"description\": 'desc'})))", "df_flag=True, param_list=self.param_list_company).param_list, self.param_list_company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).df_flag, True) def test_input_df_flag(self): \"\"\"Only", "param_list={\"description\": 'desc'}).get_data(), list(Company.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=Company, param_list={\"name\": \"abcd\"}).get_data(), list(Company.objects.filter(**{\"name\": \"abcd\"}))) #", "description='desc') IndicatorType.objects.create(name='abcd', description='desc') IndicatorType.objects.create(name='abce', description='desc') self.param_list_company = {\"name\": \"abc\", \"ticker\":", "import Company from strategies.models import IndicatorType class GetDataTestCase(TestCase): def setUp(self)", "Company.objects.create(name='abcd', ticker='ABCD', description='desc') Company.objects.create(name='abce', ticker='ABCE', description='desc') # Dummy indicator data", "Returns correct object list for Indicator self.assertEquals( Getter(table_name=IndicatorType, df_flag=False, param_list=self.param_list_indicator_type).get_data(),", "data IndicatorType.objects.create(name='abc', description='desc') IndicatorType.objects.create(name='abcd', description='desc') IndicatorType.objects.create(name='abce', description='desc') self.param_list_company = {\"name\":", "GetDataTestCase(TestCase): def setUp(self) -> None: # Dummy company data Company.objects.create(name='abc',", "'desc'} self.param_list_indicator_type = {\"name\": \"abc\", \"description\": 'desc'} def test_input_none(self): \"\"\"No", "df_flag input is provided\"\"\" self.assertEquals(Getter(df_flag=True).df_flag, True) self.assertEquals(Getter(df_flag=False).df_flag, False) def test_get_data_correct_obj_list(self):", "class GetDataTestCase(TestCase): def setUp(self) -> None: # Dummy company data", "returns correct obj list when input is correct\"\"\" # Returns", "from .converter import Converter from strategies.models import Company from strategies.models", "# Dummy company data Company.objects.create(name='abc', ticker='ABC', description='desc') Company.objects.create(name='abcd', ticker='ABCD', description='desc')", "type self.assertEquals( Getter(table_name=IndicatorType, df_flag=True, param_list=self.param_list_indicator_type).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType,", "def test_get_data_invalid_inputs(self): self.assertRaises(TypeError, Getter(table_name=\"IndicatorTyp\", df_flag=True, param_list={\"name\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=IndicatorType, param_list={\"nam\":", "\"\"\"Whether it returns correct obj list when input is correct\"\"\"", ".equals( Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"description\":", "\"abc\", \"ticker\": 'ABC', \"description\": 'desc'} self.param_list_indicator_type = {\"name\": \"abc\", \"description\":", "when input is correct\"\"\" # Returns correct object list for", "TestCase import pandas as pd from .getters import Getter from", "'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))).to_df()", "self.assertEquals(Getter(table_name=IndicatorType, param_list={\"description\": 'desc'}).get_data(), list(IndicatorType.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"name\": \"abcd\"}).get_data(), list(IndicatorType.objects.filter(**{\"name\": \"abcd\"})))", "IndicatorType.objects.create(name='abc', description='desc') IndicatorType.objects.create(name='abcd', description='desc') IndicatorType.objects.create(name='abce', description='desc') self.param_list_company = {\"name\": \"abc\",", "self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"description\":", "'desc'}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"name\": \"abcd\"}).get_data()", "def test_get_data_correct_obj_list(self): \"\"\"Whether it returns correct obj list when input", "self.assertEquals(Getter().param_list, None) self.assertEquals(Getter().df_flag, False) def test_input_all(self): \"\"\"All inputs provided\"\"\" self.assertEquals(Getter(table_name=Company,", "input is provided\"\"\" self.assertEquals(Getter(df_flag=True).df_flag, True) self.assertEquals(Getter(df_flag=False).df_flag, False) def test_get_data_correct_obj_list(self): \"\"\"Whether", "test_get_data_correct_obj_list(self): \"\"\"Whether it returns correct obj list when input is", "param_list=self.param_list_indicator_type).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals(", "'desc'}))) self.assertEquals(Getter(table_name=Company, param_list={\"name\": \"abcd\"}).get_data(), list(Company.objects.filter(**{\"name\": \"abcd\"}))) # Returns correct object", "from strategies.models import IndicatorType class GetDataTestCase(TestCase): def setUp(self) -> None:", "), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))).to_df() ),", "it returns correct obj list when input is correct\"\"\" #", "obj list when input is correct\"\"\" # Returns correct object", "Dummy indicator data IndicatorType.objects.create(name='abc', description='desc') IndicatorType.objects.create(name='abcd', description='desc') IndicatorType.objects.create(name='abce', description='desc') self.param_list_company", "is correct\"\"\" # Returns correct object list for company self.assertEquals(Getter(table_name=Company,", "list(Company.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=Company, param_list={\"name\": \"abcd\"}).get_data(), list(Company.objects.filter(**{\"name\": \"abcd\"}))) # Returns correct", "'desc'}).get_data(), list(IndicatorType.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"name\": \"abcd\"}).get_data(), list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))) def test_get_data_correct_df(self):", "when input is correct\"\"\" # Returns correct df for company", "self.assertEquals(Getter(table_name=Company, df_flag=False, param_list=self.param_list_company).get_data(), list(Company.objects.filter(**self.param_list_company))) self.assertEquals(Getter(table_name=Company, param_list={\"description\": 'desc'}).get_data(), list(Company.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=Company,", "from django.test import TestCase import pandas as pd from .getters", "self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).param_list, self.param_list_company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).df_flag, True) def test_input_df_flag(self):", "description='desc') # Dummy indicator data IndicatorType.objects.create(name='abc', description='desc') IndicatorType.objects.create(name='abcd', description='desc') IndicatorType.objects.create(name='abce',", "True) def test_get_data_invalid_inputs(self): self.assertRaises(TypeError, Getter(table_name=\"IndicatorTyp\", df_flag=True, param_list={\"name\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=IndicatorType,", "is correct\"\"\" # Returns correct df for company self.assertEquals(Getter(table_name=Company, df_flag=True,", "is provided\"\"\" self.assertEquals(Getter(df_flag=True).df_flag, True) self.assertEquals(Getter(df_flag=False).df_flag, False) def test_get_data_correct_obj_list(self): \"\"\"Whether it", "list for company self.assertEquals(Getter(table_name=Company, df_flag=False, param_list=self.param_list_company).get_data(), list(Company.objects.filter(**self.param_list_company))) self.assertEquals(Getter(table_name=Company, param_list={\"description\": 'desc'}).get_data(),", "self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=Company,", "ticker='ABCD', description='desc') Company.objects.create(name='abce', ticker='ABCE', description='desc') # Dummy indicator data IndicatorType.objects.create(name='abc',", "df when input is correct\"\"\" # Returns correct df for", "True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"description\": 'desc'}))).to_df() ), True)", "Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"description\": 'desc'}))).to_df()", "list(Company.objects.filter(**self.param_list_company))) self.assertEquals(Getter(table_name=Company, param_list={\"description\": 'desc'}).get_data(), list(Company.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=Company, param_list={\"name\": \"abcd\"}).get_data(), list(Company.objects.filter(**{\"name\":", "\"abcd\"}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) # Returns correct df", "for indicator type self.assertEquals( Getter(table_name=IndicatorType, df_flag=True, param_list=self.param_list_indicator_type).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df() ),", "True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True)", "provided\"\"\" self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).table_name, Company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).param_list, self.param_list_company) self.assertEquals(Getter(table_name=Company,", "\"\"\"No inputs are given\"\"\" self.assertEquals(Getter().table_name, None) self.assertEquals(Getter().param_list, None) self.assertEquals(Getter().df_flag, False)", "test_input_none(self): \"\"\"No inputs are given\"\"\" self.assertEquals(Getter().table_name, None) self.assertEquals(Getter().param_list, None) self.assertEquals(Getter().df_flag,", "pd from .getters import Getter from .converter import Converter from", "True) self.assertEquals(Getter(df_flag=False).df_flag, False) def test_get_data_correct_obj_list(self): \"\"\"Whether it returns correct obj", "param_list={\"name\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=IndicatorType, param_list={\"nam\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=Company, param_list={\"name\": \"abcd\",", "\"\"\"All inputs provided\"\"\" self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).table_name, Company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).param_list,", "df_flag=True, param_list=self.param_list_company).df_flag, True) def test_input_df_flag(self): \"\"\"Only df_flag input is provided\"\"\"", "self.assertEquals(Getter().table_name, None) self.assertEquals(Getter().param_list, None) self.assertEquals(Getter().df_flag, False) def test_input_all(self): \"\"\"All inputs", "param_list=self.param_list_company).get_data(), list(Company.objects.filter(**self.param_list_company))) self.assertEquals(Getter(table_name=Company, param_list={\"description\": 'desc'}).get_data(), list(Company.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=Company, param_list={\"name\": \"abcd\"}).get_data(),", "test_get_data_correct_df(self): \"\"\"Whether it returns correct df when input is correct\"\"\"", "\"\"\"Whether it returns correct df when input is correct\"\"\" #", "df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) # Returns", "\"abcd\"}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) def test_get_data_invalid_inputs(self): self.assertRaises(TypeError, Getter(table_name=\"IndicatorTyp\",", "correct df for company self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df() ),", "import Converter from strategies.models import Company from strategies.models import IndicatorType", "for company self.assertEquals(Getter(table_name=Company, df_flag=False, param_list=self.param_list_company).get_data(), list(Company.objects.filter(**self.param_list_company))) self.assertEquals(Getter(table_name=Company, param_list={\"description\": 'desc'}).get_data(), list(Company.objects.filter(**{\"description\":", ".equals( Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"description\":", "import IndicatorType class GetDataTestCase(TestCase): def setUp(self) -> None: # Dummy", "self.assertEquals(Getter(table_name=IndicatorType, param_list={\"name\": \"abcd\"}).get_data(), list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))) def test_get_data_correct_df(self): \"\"\"Whether it returns", "def test_input_df_flag(self): \"\"\"Only df_flag input is provided\"\"\" self.assertEquals(Getter(df_flag=True).df_flag, True) self.assertEquals(Getter(df_flag=False).df_flag,", "company data Company.objects.create(name='abc', ticker='ABC', description='desc') Company.objects.create(name='abcd', ticker='ABCD', description='desc') Company.objects.create(name='abce', ticker='ABCE',", "self.assertEquals(Getter(df_flag=True).df_flag, True) self.assertEquals(Getter(df_flag=False).df_flag, False) def test_get_data_correct_obj_list(self): \"\"\"Whether it returns correct", "False) def test_input_all(self): \"\"\"All inputs provided\"\"\" self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).table_name, Company)", "), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"description\": 'desc'}))).to_df() ),", "\"abcd\"}))) # Returns correct object list for Indicator self.assertEquals( Getter(table_name=IndicatorType,", "param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"name\":", "None) self.assertEquals(Getter().df_flag, False) def test_input_all(self): \"\"\"All inputs provided\"\"\" self.assertEquals(Getter(table_name=Company, df_flag=True,", "inputs are given\"\"\" self.assertEquals(Getter().table_name, None) self.assertEquals(Getter().param_list, None) self.assertEquals(Getter().df_flag, False) def", "Company from strategies.models import IndicatorType class GetDataTestCase(TestCase): def setUp(self) ->", "correct\"\"\" # Returns correct object list for company self.assertEquals(Getter(table_name=Company, df_flag=False,", "df_flag=True, param_list=self.param_list_company).table_name, Company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).param_list, self.param_list_company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).df_flag,", "param_list=self.param_list_company).table_name, Company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).param_list, self.param_list_company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).df_flag, True)", ".equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals(", "correct object list for Indicator self.assertEquals( Getter(table_name=IndicatorType, df_flag=False, param_list=self.param_list_indicator_type).get_data(), list(IndicatorType.objects.filter(**self.param_list_indicator_type)))", "def setUp(self) -> None: # Dummy company data Company.objects.create(name='abc', ticker='ABC',", "'desc'}).get_data(), list(Company.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=Company, param_list={\"name\": \"abcd\"}).get_data(), list(Company.objects.filter(**{\"name\": \"abcd\"}))) # Returns", "list(IndicatorType.objects.filter(**self.param_list_indicator_type))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"description\": 'desc'}).get_data(), list(IndicatorType.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"name\": \"abcd\"}).get_data(), list(IndicatorType.objects.filter(**{\"name\":", "\"abcd\"}).get_data(), list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))) def test_get_data_correct_df(self): \"\"\"Whether it returns correct df", "Converter(obj_list=list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) def test_get_data_invalid_inputs(self): self.assertRaises(TypeError, Getter(table_name=\"IndicatorTyp\", df_flag=True, param_list={\"name\":", ".converter import Converter from strategies.models import Company from strategies.models import", "-> None: # Dummy company data Company.objects.create(name='abc', ticker='ABC', description='desc') Company.objects.create(name='abcd',", "\"description\": 'desc'} self.param_list_indicator_type = {\"name\": \"abc\", \"description\": 'desc'} def test_input_none(self):", "Company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).param_list, self.param_list_company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).df_flag, True) def", "Indicator self.assertEquals( Getter(table_name=IndicatorType, df_flag=False, param_list=self.param_list_indicator_type).get_data(), list(IndicatorType.objects.filter(**self.param_list_indicator_type))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"description\": 'desc'}).get_data(), list(IndicatorType.objects.filter(**{\"description\":", "), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"description\": 'desc'}))).to_df() ),", "self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) #", "test_input_df_flag(self): \"\"\"Only df_flag input is provided\"\"\" self.assertEquals(Getter(df_flag=True).df_flag, True) self.assertEquals(Getter(df_flag=False).df_flag, False)", "correct df when input is correct\"\"\" # Returns correct df", "input is correct\"\"\" # Returns correct object list for company", "), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"name\": \"abcd\"}))).to_df() ),", "self.assertEquals(Getter(df_flag=False).df_flag, False) def test_get_data_correct_obj_list(self): \"\"\"Whether it returns correct obj list", "self.assertRaises(TypeError, Getter(table_name=IndicatorType, param_list={\"nam\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=Company, param_list={\"name\": \"abcd\", \"res\": \"abcd\"}))", "None: # Dummy company data Company.objects.create(name='abc', ticker='ABC', description='desc') Company.objects.create(name='abcd', ticker='ABCD',", "df_flag=True, param_list=self.param_list_company).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"description\": 'desc'}).get_data()", "for Indicator self.assertEquals( Getter(table_name=IndicatorType, df_flag=False, param_list=self.param_list_indicator_type).get_data(), list(IndicatorType.objects.filter(**self.param_list_indicator_type))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"description\": 'desc'}).get_data(),", "Returns correct df for indicator type self.assertEquals( Getter(table_name=IndicatorType, df_flag=True, param_list=self.param_list_indicator_type).get_data()", "None) self.assertEquals(Getter().param_list, None) self.assertEquals(Getter().df_flag, False) def test_input_all(self): \"\"\"All inputs provided\"\"\"", "param_list={\"description\": 'desc'}).get_data(), list(IndicatorType.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"name\": \"abcd\"}).get_data(), list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))) def", "IndicatorType class GetDataTestCase(TestCase): def setUp(self) -> None: # Dummy company", "), True) def test_get_data_invalid_inputs(self): self.assertRaises(TypeError, Getter(table_name=\"IndicatorTyp\", df_flag=True, param_list={\"name\": \"abcd\"}).get_data) self.assertRaises(TypeError,", "are given\"\"\" self.assertEquals(Getter().table_name, None) self.assertEquals(Getter().param_list, None) self.assertEquals(Getter().df_flag, False) def test_input_all(self):", "provided\"\"\" self.assertEquals(Getter(df_flag=True).df_flag, True) self.assertEquals(Getter(df_flag=False).df_flag, False) def test_get_data_correct_obj_list(self): \"\"\"Whether it returns", "{\"name\": \"abc\", \"ticker\": 'ABC', \"description\": 'desc'} self.param_list_indicator_type = {\"name\": \"abc\",", "Getter(table_name=\"IndicatorTyp\", df_flag=True, param_list={\"name\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=IndicatorType, param_list={\"nam\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=Company,", "test_input_all(self): \"\"\"All inputs provided\"\"\" self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).table_name, Company) self.assertEquals(Getter(table_name=Company, df_flag=True,", "# Dummy indicator data IndicatorType.objects.create(name='abc', description='desc') IndicatorType.objects.create(name='abcd', description='desc') IndicatorType.objects.create(name='abce', description='desc')", "inputs provided\"\"\" self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).table_name, Company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).param_list, self.param_list_company)", "\"abcd\"}).get_data(), list(Company.objects.filter(**{\"name\": \"abcd\"}))) # Returns correct object list for Indicator", ".equals( Converter(obj_list=list(Company.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals(", "indicator data IndicatorType.objects.create(name='abc', description='desc') IndicatorType.objects.create(name='abcd', description='desc') IndicatorType.objects.create(name='abce', description='desc') self.param_list_company =", "self.param_list_indicator_type = {\"name\": \"abc\", \"description\": 'desc'} def test_input_none(self): \"\"\"No inputs", "Getter(table_name=IndicatorType, df_flag=True, param_list=self.param_list_indicator_type).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"description\":", "\"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=IndicatorType, param_list={\"nam\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=Company, param_list={\"name\": \"abcd\", \"res\":", "df_flag=False, param_list=self.param_list_company).get_data(), list(Company.objects.filter(**self.param_list_company))) self.assertEquals(Getter(table_name=Company, param_list={\"description\": 'desc'}).get_data(), list(Company.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=Company, param_list={\"name\":", "param_list=self.param_list_company).df_flag, True) def test_input_df_flag(self): \"\"\"Only df_flag input is provided\"\"\" self.assertEquals(Getter(df_flag=True).df_flag,", "import TestCase import pandas as pd from .getters import Getter", "param_list={\"name\": \"abcd\"}).get_data(), list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))) def test_get_data_correct_df(self): \"\"\"Whether it returns correct", "correct obj list when input is correct\"\"\" # Returns correct", "\"abcd\"}))) def test_get_data_correct_df(self): \"\"\"Whether it returns correct df when input", "returns correct df when input is correct\"\"\" # Returns correct", "False) def test_get_data_correct_obj_list(self): \"\"\"Whether it returns correct obj list when", "ticker='ABCE', description='desc') # Dummy indicator data IndicatorType.objects.create(name='abc', description='desc') IndicatorType.objects.create(name='abcd', description='desc')", "as pd from .getters import Getter from .converter import Converter", "\"\"\"Only df_flag input is provided\"\"\" self.assertEquals(Getter(df_flag=True).df_flag, True) self.assertEquals(Getter(df_flag=False).df_flag, False) def", "df for company self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df() ), True)", "description='desc') self.param_list_company = {\"name\": \"abc\", \"ticker\": 'ABC', \"description\": 'desc'} self.param_list_indicator_type", "param_list=self.param_list_company).param_list, self.param_list_company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).df_flag, True) def test_input_df_flag(self): \"\"\"Only df_flag", "self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType,", "list(IndicatorType.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"name\": \"abcd\"}).get_data(), list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))) def test_get_data_correct_df(self): \"\"\"Whether", "'desc'}))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"name\": \"abcd\"}).get_data(), list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))) def test_get_data_correct_df(self): \"\"\"Whether it", "param_list=self.param_list_company).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals(", "strategies.models import Company from strategies.models import IndicatorType class GetDataTestCase(TestCase): def", "True) # Returns correct df for indicator type self.assertEquals( Getter(table_name=IndicatorType,", "self.assertEquals(Getter().df_flag, False) def test_input_all(self): \"\"\"All inputs provided\"\"\" self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).table_name,", "df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True,", "param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) def test_get_data_invalid_inputs(self): self.assertRaises(TypeError,", "self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).table_name, Company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).param_list, self.param_list_company) self.assertEquals(Getter(table_name=Company, df_flag=True,", "list(Company.objects.filter(**{\"name\": \"abcd\"}))) # Returns correct object list for Indicator self.assertEquals(", "'desc'}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"name\": \"abcd\"}).get_data()", "Getter(table_name=IndicatorType, df_flag=False, param_list=self.param_list_indicator_type).get_data(), list(IndicatorType.objects.filter(**self.param_list_indicator_type))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"description\": 'desc'}).get_data(), list(IndicatorType.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=IndicatorType,", "for company self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df() ), True) self.assertEquals(Getter(table_name=Company,", "df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) def test_get_data_invalid_inputs(self):", "description='desc') IndicatorType.objects.create(name='abce', description='desc') self.param_list_company = {\"name\": \"abc\", \"ticker\": 'ABC', \"description\":", "{\"name\": \"abc\", \"description\": 'desc'} def test_input_none(self): \"\"\"No inputs are given\"\"\"", "Converter(obj_list=list(Company.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"name\":", "Dummy company data Company.objects.create(name='abc', ticker='ABC', description='desc') Company.objects.create(name='abcd', ticker='ABCD', description='desc') Company.objects.create(name='abce',", "def test_input_none(self): \"\"\"No inputs are given\"\"\" self.assertEquals(Getter().table_name, None) self.assertEquals(Getter().param_list, None)", "Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df() ), True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"description\": 'desc'}))).to_df()", "data Company.objects.create(name='abc', ticker='ABC', description='desc') Company.objects.create(name='abcd', ticker='ABCD', description='desc') Company.objects.create(name='abce', ticker='ABCE', description='desc')", "param_list={\"name\": \"abcd\"}).get_data(), list(Company.objects.filter(**{\"name\": \"abcd\"}))) # Returns correct object list for", "object list for Indicator self.assertEquals( Getter(table_name=IndicatorType, df_flag=False, param_list=self.param_list_indicator_type).get_data(), list(IndicatorType.objects.filter(**self.param_list_indicator_type))) self.assertEquals(Getter(table_name=IndicatorType,", "# Returns correct object list for company self.assertEquals(Getter(table_name=Company, df_flag=False, param_list=self.param_list_company).get_data(),", "correct\"\"\" # Returns correct df for company self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).get_data()", "param_list={\"description\": 'desc'}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"description\": 'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"name\":", "), True) # Returns correct df for indicator type self.assertEquals(", "correct df for indicator type self.assertEquals( Getter(table_name=IndicatorType, df_flag=True, param_list=self.param_list_indicator_type).get_data() .equals(", "<gh_stars>1-10 from django.test import TestCase import pandas as pd from", "# Returns correct df for company self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).get_data() .equals(", "correct object list for company self.assertEquals(Getter(table_name=Company, df_flag=False, param_list=self.param_list_company).get_data(), list(Company.objects.filter(**self.param_list_company))) self.assertEquals(Getter(table_name=Company,", "'desc'}))).to_df() ), True) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"name\": \"abcd\"}))).to_df()", "from .getters import Getter from .converter import Converter from strategies.models", "it returns correct df when input is correct\"\"\" # Returns", "list for Indicator self.assertEquals( Getter(table_name=IndicatorType, df_flag=False, param_list=self.param_list_indicator_type).get_data(), list(IndicatorType.objects.filter(**self.param_list_indicator_type))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"description\":", "param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) # Returns correct", "ticker='ABC', description='desc') Company.objects.create(name='abcd', ticker='ABCD', description='desc') Company.objects.create(name='abce', ticker='ABCE', description='desc') # Dummy", "given\"\"\" self.assertEquals(Getter().table_name, None) self.assertEquals(Getter().param_list, None) self.assertEquals(Getter().df_flag, False) def test_input_all(self): \"\"\"All", "Returns correct object list for company self.assertEquals(Getter(table_name=Company, df_flag=False, param_list=self.param_list_company).get_data(), list(Company.objects.filter(**self.param_list_company)))", "True) def test_input_df_flag(self): \"\"\"Only df_flag input is provided\"\"\" self.assertEquals(Getter(df_flag=True).df_flag, True)", "= {\"name\": \"abc\", \"description\": 'desc'} def test_input_none(self): \"\"\"No inputs are", "\"description\": 'desc'} def test_input_none(self): \"\"\"No inputs are given\"\"\" self.assertEquals(Getter().table_name, None)", "self.param_list_company = {\"name\": \"abc\", \"ticker\": 'ABC', \"description\": 'desc'} self.param_list_indicator_type =", "'desc'} def test_input_none(self): \"\"\"No inputs are given\"\"\" self.assertEquals(Getter().table_name, None) self.assertEquals(Getter().param_list,", "list when input is correct\"\"\" # Returns correct object list", "self.assertEquals(Getter(table_name=Company, param_list={\"name\": \"abcd\"}).get_data(), list(Company.objects.filter(**{\"name\": \"abcd\"}))) # Returns correct object list", "df for indicator type self.assertEquals( Getter(table_name=IndicatorType, df_flag=True, param_list=self.param_list_indicator_type).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df()", "# Returns correct object list for Indicator self.assertEquals( Getter(table_name=IndicatorType, df_flag=False,", "Returns correct df for company self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).get_data() .equals( Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df()", "def test_input_all(self): \"\"\"All inputs provided\"\"\" self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).table_name, Company) self.assertEquals(Getter(table_name=Company,", "self.param_list_company) self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).df_flag, True) def test_input_df_flag(self): \"\"\"Only df_flag input", "django.test import TestCase import pandas as pd from .getters import", "param_list=self.param_list_indicator_type).get_data(), list(IndicatorType.objects.filter(**self.param_list_indicator_type))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"description\": 'desc'}).get_data(), list(IndicatorType.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"name\": \"abcd\"}).get_data(),", "df_flag=False, param_list=self.param_list_indicator_type).get_data(), list(IndicatorType.objects.filter(**self.param_list_indicator_type))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"description\": 'desc'}).get_data(), list(IndicatorType.objects.filter(**{\"description\": 'desc'}))) self.assertEquals(Getter(table_name=IndicatorType, param_list={\"name\":", "\"abcd\"}))).to_df() ), True) # Returns correct df for indicator type", ".equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True) def test_get_data_invalid_inputs(self): self.assertRaises(TypeError, Getter(table_name=\"IndicatorTyp\", df_flag=True,", "Company.objects.create(name='abce', ticker='ABCE', description='desc') # Dummy indicator data IndicatorType.objects.create(name='abc', description='desc') IndicatorType.objects.create(name='abcd',", "test_get_data_invalid_inputs(self): self.assertRaises(TypeError, Getter(table_name=\"IndicatorTyp\", df_flag=True, param_list={\"name\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=IndicatorType, param_list={\"nam\": \"abcd\"}).get_data)", "True) self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={\"name\": \"abcd\"}).get_data() .equals( Converter(obj_list=list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))).to_df() ), True)", "object list for company self.assertEquals(Getter(table_name=Company, df_flag=False, param_list=self.param_list_company).get_data(), list(Company.objects.filter(**self.param_list_company))) self.assertEquals(Getter(table_name=Company, param_list={\"description\":", "list(IndicatorType.objects.filter(**{\"name\": \"abcd\"}))) def test_get_data_correct_df(self): \"\"\"Whether it returns correct df when", "df_flag=True, param_list={\"name\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=IndicatorType, param_list={\"nam\": \"abcd\"}).get_data) self.assertRaises(TypeError, Getter(table_name=Company, param_list={\"name\":" ]
[ "..registry import TRAINERS @TRAINERS.register_module() class MMDetectionTrainer(BaseTrainer): def __init__(self): super(MMDetectionTrainer, self).__init__()", "Corporation Licensed under the Apache License, Version 2.0 (the \"License\");", "for t in cfg.data.train.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] else:", "= cfg.model.bbox_head.anchor_generator.widths n_clust = 0 for w in widths: n_clust", "import MMDETECTION_TOOLS from .base import BaseTrainer from ..registry import TRAINERS", "update_config): if self.__is_clustering_needed(cfg): update_config = self.__cluster(cfg, config_path, update_config) return update_config", "--n_clust ' + str(n_clust) group_as = '' if isinstance(widths[0], (list,", "Unless required by applicable law or agreed to in writing,", "by applicable law or agreed to in writing, software distributed", "in cfg.data.train.dataset.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] img_shape = f'", "hasattr(cfg.model, 'bbox_head') or not cfg.model.bbox_head.type == 'SSDHead': return False if", "or not cfg.model.bbox_head.type == 'SSDHead': return False if not cfg.model.bbox_head.anchor_generator.type", "f'{img_shape}' f'{out}'.split(' '), check=True) with open(tmp_file.name) as src_file: content =", "'), check=True) with open(tmp_file.name) as src_file: content = json.load(src_file) widths,", "heights = content['widths'], content['heights'] if not update_config: update_config = '", "tuple)): group_as = ' --group_as ' + ' '.join([str(len(w)) for", "software distributed under the License is distributed on an \"AS", "distributed under the License is distributed on an \"AS IS\"", "'img_scale'] img_shape = f' --image_size_wh {img_shape[0]} {img_shape[1]}' subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py' f'{config}'", "cfg.data.train.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] else: img_shape = [t", "widths = cfg.model.bbox_head.anchor_generator.widths n_clust = 0 for w in widths:", "t['type'] == 'Resize'][0][ 'img_scale'] img_shape = f' --image_size_wh {img_shape[0]} {img_shape[1]}'", "'' if isinstance(widths[0], (list, tuple)): group_as = ' --group_as '", "@TRAINERS.register_module() class MMDetectionTrainer(BaseTrainer): def __init__(self): super(MMDetectionTrainer, self).__init__() def _get_tools_dir(self): return", "CONDITIONS OF ANY KIND, either express or implied. See the", "--image_size_wh {img_shape[0]} {img_shape[1]}' subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py' f'{config}' f'{n_clust}' f'{group_as}' f'{update_config}' f'{img_shape}'", "not update_config: update_config = ' --update_config' update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(\"", "'SSDAnchorGeneratorClustered': return False return True @staticmethod def __cluster(cfg, config_path, update_config):", "Version 2.0 (the \"License\"); you may not use this file", "+= f' model.bbox_head.anchor_generator.heights={str(heights).replace(\" \", \"\")}' logging.info('... clustering completed.') return update_config", "writing, software distributed under the License is distributed on an", "= ' --group_as ' + ' '.join([str(len(w)) for w in", "update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(\" \", \"\")}' logging.info('... clustering completed.') return", "+= len(w) if isinstance(w, (list, tuple)) else 1 n_clust =", "= '' if isinstance(widths[0], (list, tuple)): group_as = ' --group_as", "{tmp_file.name}' if 'pipeline' in cfg.data.train: img_shape = [t for t", "= json.load(src_file) widths, heights = content['widths'], content['heights'] if not update_config:", "widths, heights = content['widths'], content['heights'] if not update_config: update_config =", "not use this file except in compliance with the License.", "2.0 (the \"License\"); you may not use this file except", "Apache License, Version 2.0 (the \"License\"); you may not use", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "express or implied. See the License for the specific language", "content = json.load(src_file) widths, heights = content['widths'], content['heights'] if not", "= f' --out {tmp_file.name}' if 'pipeline' in cfg.data.train: img_shape =", "_add_extra_args(self, cfg, config_path, update_config): if self.__is_clustering_needed(cfg): update_config = self.__cluster(cfg, config_path,", "{MMDETECTION_TOOLS}/cluster_boxes.py' f'{config}' f'{n_clust}' f'{group_as}' f'{update_config}' f'{img_shape}' f'{out}'.split(' '), check=True) with", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "update_config: update_config = ' --update_config' update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(\" \",", "in compliance with the License. You may obtain a copy", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "\"\"\" Copyright (c) 2020 Intel Corporation Licensed under the Apache", "2020 Intel Corporation Licensed under the Apache License, Version 2.0", "== 'SSDHead': return False if not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered': return", "you may not use this file except in compliance with", "subprocess import tempfile from ote import MMDETECTION_TOOLS from .base import", "logging.info('Clustering started...') widths = cfg.model.bbox_head.anchor_generator.widths n_clust = 0 for w", "' --n_clust ' + str(n_clust) group_as = '' if isinstance(widths[0],", "<filename>pytorch_toolkit/ote/ote/modules/trainers/mmdetection.py \"\"\" Copyright (c) 2020 Intel Corporation Licensed under the", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "False if not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered': return False return True", "the License. You may obtain a copy of the License", "agreed to in writing, software distributed under the License is", "import BaseTrainer from ..registry import TRAINERS @TRAINERS.register_module() class MMDetectionTrainer(BaseTrainer): def", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "def __is_clustering_needed(cfg): if cfg.total_epochs > 0: return False if not", "' + str(n_clust) group_as = '' if isinstance(widths[0], (list, tuple)):", "logging import subprocess import tempfile from ote import MMDETECTION_TOOLS from", "+= f' model.bbox_head.anchor_generator.widths={str(widths).replace(\" \", \"\")}' update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(\" \",", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "return False if not hasattr(cfg.model, 'bbox_head') or not cfg.model.bbox_head.type ==", "use this file except in compliance with the License. You", "open(tmp_file.name) as src_file: content = json.load(src_file) widths, heights = content['widths'],", "if isinstance(widths[0], (list, tuple)): group_as = ' --group_as ' +", "return False return True @staticmethod def __cluster(cfg, config_path, update_config): logging.info('Clustering", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "with open(tmp_file.name) as src_file: content = json.load(src_file) widths, heights =", "[t for t in cfg.data.train.dataset.pipeline if t['type'] == 'Resize'][0][ 'img_scale']", "ANY KIND, either express or implied. See the License for", "started...') widths = cfg.model.bbox_head.anchor_generator.widths n_clust = 0 for w in", "if self.__is_clustering_needed(cfg): update_config = self.__cluster(cfg, config_path, update_config) return update_config @staticmethod", "0: return False if not hasattr(cfg.model, 'bbox_head') or not cfg.model.bbox_head.type", "cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered': return False return True @staticmethod def __cluster(cfg,", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "cfg.model.bbox_head.anchor_generator.widths n_clust = 0 for w in widths: n_clust +=", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "' --config ' + config_path tmp_file = tempfile.NamedTemporaryFile(delete=False) out =", "0 for w in widths: n_clust += len(w) if isinstance(w,", "--config ' + config_path tmp_file = tempfile.NamedTemporaryFile(delete=False) out = f'", "f'{n_clust}' f'{group_as}' f'{update_config}' f'{img_shape}' f'{out}'.split(' '), check=True) with open(tmp_file.name) as", "True @staticmethod def __cluster(cfg, config_path, update_config): logging.info('Clustering started...') widths =", "__init__(self): super(MMDetectionTrainer, self).__init__() def _get_tools_dir(self): return MMDETECTION_TOOLS def _add_extra_args(self, cfg,", "permissions and limitations under the License. \"\"\" import json import", "not cfg.model.bbox_head.type == 'SSDHead': return False if not cfg.model.bbox_head.anchor_generator.type ==", "super(MMDetectionTrainer, self).__init__() def _get_tools_dir(self): return MMDETECTION_TOOLS def _add_extra_args(self, cfg, config_path,", "either express or implied. See the License for the specific", "__cluster(cfg, config_path, update_config): logging.info('Clustering started...') widths = cfg.model.bbox_head.anchor_generator.widths n_clust =", "if isinstance(w, (list, tuple)) else 1 n_clust = ' --n_clust", "config_path, update_config): logging.info('Clustering started...') widths = cfg.model.bbox_head.anchor_generator.widths n_clust = 0", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "config_path, update_config): if self.__is_clustering_needed(cfg): update_config = self.__cluster(cfg, config_path, update_config) return", "TRAINERS @TRAINERS.register_module() class MMDetectionTrainer(BaseTrainer): def __init__(self): super(MMDetectionTrainer, self).__init__() def _get_tools_dir(self):", "under the License is distributed on an \"AS IS\" BASIS,", "\"License\"); you may not use this file except in compliance", "update_config) return update_config @staticmethod def __is_clustering_needed(cfg): if cfg.total_epochs > 0:", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "n_clust += len(w) if isinstance(w, (list, tuple)) else 1 n_clust", "= self.__cluster(cfg, config_path, update_config) return update_config @staticmethod def __is_clustering_needed(cfg): if", "with the License. You may obtain a copy of the", "update_config = self.__cluster(cfg, config_path, update_config) return update_config @staticmethod def __is_clustering_needed(cfg):", "cfg.model.bbox_head.type == 'SSDHead': return False if not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered':", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "return True @staticmethod def __cluster(cfg, config_path, update_config): logging.info('Clustering started...') widths", "License for the specific language governing permissions and limitations under", "if not hasattr(cfg.model, 'bbox_head') or not cfg.model.bbox_head.type == 'SSDHead': return", "update_config @staticmethod def __is_clustering_needed(cfg): if cfg.total_epochs > 0: return False", "if t['type'] == 'Resize'][0][ 'img_scale'] img_shape = f' --image_size_wh {img_shape[0]}", "f'{config}' f'{n_clust}' f'{group_as}' f'{update_config}' f'{img_shape}' f'{out}'.split(' '), check=True) with open(tmp_file.name)", "import logging import subprocess import tempfile from ote import MMDETECTION_TOOLS", "+ config_path tmp_file = tempfile.NamedTemporaryFile(delete=False) out = f' --out {tmp_file.name}'", "= ' --config ' + config_path tmp_file = tempfile.NamedTemporaryFile(delete=False) out", "tempfile.NamedTemporaryFile(delete=False) out = f' --out {tmp_file.name}' if 'pipeline' in cfg.data.train:", "for w in widths: n_clust += len(w) if isinstance(w, (list,", "f' --image_size_wh {img_shape[0]} {img_shape[1]}' subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py' f'{config}' f'{n_clust}' f'{group_as}' f'{update_config}'", "this file except in compliance with the License. You may", "' '.join([str(len(w)) for w in widths]) config = ' --config", "'SSDHead': return False if not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered': return False", "from ..registry import TRAINERS @TRAINERS.register_module() class MMDetectionTrainer(BaseTrainer): def __init__(self): super(MMDetectionTrainer,", "' + ' '.join([str(len(w)) for w in widths]) config =", "= [t for t in cfg.data.train.dataset.pipeline if t['type'] == 'Resize'][0][", "{img_shape[1]}' subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py' f'{config}' f'{n_clust}' f'{group_as}' f'{update_config}' f'{img_shape}' f'{out}'.split(' '),", "f' model.bbox_head.anchor_generator.widths={str(widths).replace(\" \", \"\")}' update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(\" \", \"\")}'", "[t for t in cfg.data.train.pipeline if t['type'] == 'Resize'][0][ 'img_scale']", "specific language governing permissions and limitations under the License. \"\"\"", "+ str(n_clust) group_as = '' if isinstance(widths[0], (list, tuple)): group_as", "(the \"License\"); you may not use this file except in", "update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(\" \", \"\")}' update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(\"", "return MMDETECTION_TOOLS def _add_extra_args(self, cfg, config_path, update_config): if self.__is_clustering_needed(cfg): update_config", "False return True @staticmethod def __cluster(cfg, config_path, update_config): logging.info('Clustering started...')", "str(n_clust) group_as = '' if isinstance(widths[0], (list, tuple)): group_as =", "config_path tmp_file = tempfile.NamedTemporaryFile(delete=False) out = f' --out {tmp_file.name}' if", "if not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered': return False return True @staticmethod", "= f' --image_size_wh {img_shape[0]} {img_shape[1]}' subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py' f'{config}' f'{n_clust}' f'{group_as}'", "self.__cluster(cfg, config_path, update_config) return update_config @staticmethod def __is_clustering_needed(cfg): if cfg.total_epochs", "tuple)) else 1 n_clust = ' --n_clust ' + str(n_clust)", "for w in widths]) config = ' --config ' +", "\"\")}' update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(\" \", \"\")}' logging.info('... clustering completed.')", "import TRAINERS @TRAINERS.register_module() class MMDetectionTrainer(BaseTrainer): def __init__(self): super(MMDetectionTrainer, self).__init__() def", "'.join([str(len(w)) for w in widths]) config = ' --config '", "import json import logging import subprocess import tempfile from ote", "config = ' --config ' + config_path tmp_file = tempfile.NamedTemporaryFile(delete=False)", "applicable law or agreed to in writing, software distributed under", "{img_shape[0]} {img_shape[1]}' subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py' f'{config}' f'{n_clust}' f'{group_as}' f'{update_config}' f'{img_shape}' f'{out}'.split('", "return update_config @staticmethod def __is_clustering_needed(cfg): if cfg.total_epochs > 0: return", "@staticmethod def __is_clustering_needed(cfg): if cfg.total_epochs > 0: return False if", "if cfg.total_epochs > 0: return False if not hasattr(cfg.model, 'bbox_head')", "governing permissions and limitations under the License. \"\"\" import json", "img_shape = f' --image_size_wh {img_shape[0]} {img_shape[1]}' subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py' f'{config}' f'{n_clust}'", "widths: n_clust += len(w) if isinstance(w, (list, tuple)) else 1", "MMDETECTION_TOOLS def _add_extra_args(self, cfg, config_path, update_config): if self.__is_clustering_needed(cfg): update_config =", "widths]) config = ' --config ' + config_path tmp_file =", "def _add_extra_args(self, cfg, config_path, update_config): if self.__is_clustering_needed(cfg): update_config = self.__cluster(cfg,", "(c) 2020 Intel Corporation Licensed under the Apache License, Version", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "else: img_shape = [t for t in cfg.data.train.dataset.pipeline if t['type']", "model.bbox_head.anchor_generator.widths={str(widths).replace(\" \", \"\")}' update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(\" \", \"\")}' logging.info('...", "isinstance(w, (list, tuple)) else 1 n_clust = ' --n_clust '", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "the specific language governing permissions and limitations under the License.", "(list, tuple)): group_as = ' --group_as ' + ' '.join([str(len(w))", "'img_scale'] else: img_shape = [t for t in cfg.data.train.dataset.pipeline if", "1 n_clust = ' --n_clust ' + str(n_clust) group_as =", "--update_config' update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(\" \", \"\")}' update_config += f'", "not hasattr(cfg.model, 'bbox_head') or not cfg.model.bbox_head.type == 'SSDHead': return False", "under the License. \"\"\" import json import logging import subprocess", "check=True) with open(tmp_file.name) as src_file: content = json.load(src_file) widths, heights", "'bbox_head') or not cfg.model.bbox_head.type == 'SSDHead': return False if not", "import tempfile from ote import MMDETECTION_TOOLS from .base import BaseTrainer", "the Apache License, Version 2.0 (the \"License\"); you may not", "file except in compliance with the License. You may obtain", "except in compliance with the License. You may obtain a", "or implied. See the License for the specific language governing", "KIND, either express or implied. See the License for the", "class MMDetectionTrainer(BaseTrainer): def __init__(self): super(MMDetectionTrainer, self).__init__() def _get_tools_dir(self): return MMDETECTION_TOOLS", "_get_tools_dir(self): return MMDETECTION_TOOLS def _add_extra_args(self, cfg, config_path, update_config): if self.__is_clustering_needed(cfg):", "__is_clustering_needed(cfg): if cfg.total_epochs > 0: return False if not hasattr(cfg.model,", "to in writing, software distributed under the License is distributed", "def _get_tools_dir(self): return MMDETECTION_TOOLS def _add_extra_args(self, cfg, config_path, update_config): if", "n_clust = ' --n_clust ' + str(n_clust) group_as = ''", "cfg.data.train.dataset.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] img_shape = f' --image_size_wh", "= ' --update_config' update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(\" \", \"\")}' update_config", "\"\"\" import json import logging import subprocess import tempfile from", "update_config): logging.info('Clustering started...') widths = cfg.model.bbox_head.anchor_generator.widths n_clust = 0 for", "content['heights'] if not update_config: update_config = ' --update_config' update_config +=", "or agreed to in writing, software distributed under the License", "tmp_file = tempfile.NamedTemporaryFile(delete=False) out = f' --out {tmp_file.name}' if 'pipeline'", "--group_as ' + ' '.join([str(len(w)) for w in widths]) config", "law or agreed to in writing, software distributed under the", "OR CONDITIONS OF ANY KIND, either express or implied. See", "f'{update_config}' f'{img_shape}' f'{out}'.split(' '), check=True) with open(tmp_file.name) as src_file: content", "ote import MMDETECTION_TOOLS from .base import BaseTrainer from ..registry import", "compliance with the License. You may obtain a copy of", "Copyright (c) 2020 Intel Corporation Licensed under the Apache License,", "language governing permissions and limitations under the License. \"\"\" import", "'Resize'][0][ 'img_scale'] img_shape = f' --image_size_wh {img_shape[0]} {img_shape[1]}' subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py'", "OF ANY KIND, either express or implied. See the License", "+ ' '.join([str(len(w)) for w in widths]) config = '", "== 'Resize'][0][ 'img_scale'] img_shape = f' --image_size_wh {img_shape[0]} {img_shape[1]}' subprocess.run(f'python", "under the Apache License, Version 2.0 (the \"License\"); you may", "f'{out}'.split(' '), check=True) with open(tmp_file.name) as src_file: content = json.load(src_file)", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "in widths]) config = ' --config ' + config_path tmp_file", "t in cfg.data.train.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] else: img_shape", "> 0: return False if not hasattr(cfg.model, 'bbox_head') or not", "(list, tuple)) else 1 n_clust = ' --n_clust ' +", "not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered': return False return True @staticmethod def", "json.load(src_file) widths, heights = content['widths'], content['heights'] if not update_config: update_config", "n_clust = 0 for w in widths: n_clust += len(w)", "if not update_config: update_config = ' --update_config' update_config += f'", "if 'pipeline' in cfg.data.train: img_shape = [t for t in", "@staticmethod def __cluster(cfg, config_path, update_config): logging.info('Clustering started...') widths = cfg.model.bbox_head.anchor_generator.widths", "self.__is_clustering_needed(cfg): update_config = self.__cluster(cfg, config_path, update_config) return update_config @staticmethod def", "MMDetectionTrainer(BaseTrainer): def __init__(self): super(MMDetectionTrainer, self).__init__() def _get_tools_dir(self): return MMDETECTION_TOOLS def", "out = f' --out {tmp_file.name}' if 'pipeline' in cfg.data.train: img_shape", "content['widths'], content['heights'] if not update_config: update_config = ' --update_config' update_config", "= 0 for w in widths: n_clust += len(w) if", "License, Version 2.0 (the \"License\"); you may not use this", "t in cfg.data.train.dataset.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] img_shape =", "self).__init__() def _get_tools_dir(self): return MMDETECTION_TOOLS def _add_extra_args(self, cfg, config_path, update_config):", "else 1 n_clust = ' --n_clust ' + str(n_clust) group_as", "\", \"\")}' update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(\" \", \"\")}' logging.info('... clustering", "MMDETECTION_TOOLS from .base import BaseTrainer from ..registry import TRAINERS @TRAINERS.register_module()", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "Intel Corporation Licensed under the Apache License, Version 2.0 (the", "for the specific language governing permissions and limitations under the", "cfg.data.train: img_shape = [t for t in cfg.data.train.pipeline if t['type']", "See the License for the specific language governing permissions and", "the License. \"\"\" import json import logging import subprocess import", "== 'Resize'][0][ 'img_scale'] else: img_shape = [t for t in", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "tempfile from ote import MMDETECTION_TOOLS from .base import BaseTrainer from", "and limitations under the License. \"\"\" import json import logging", "json import logging import subprocess import tempfile from ote import", "w in widths: n_clust += len(w) if isinstance(w, (list, tuple))", "= [t for t in cfg.data.train.pipeline if t['type'] == 'Resize'][0][", "in cfg.data.train: img_shape = [t for t in cfg.data.train.pipeline if", "src_file: content = json.load(src_file) widths, heights = content['widths'], content['heights'] if", "' + config_path tmp_file = tempfile.NamedTemporaryFile(delete=False) out = f' --out", "f' --out {tmp_file.name}' if 'pipeline' in cfg.data.train: img_shape = [t", "'pipeline' in cfg.data.train: img_shape = [t for t in cfg.data.train.pipeline", "subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py' f'{config}' f'{n_clust}' f'{group_as}' f'{update_config}' f'{img_shape}' f'{out}'.split(' '), check=True)", "f'{group_as}' f'{update_config}' f'{img_shape}' f'{out}'.split(' '), check=True) with open(tmp_file.name) as src_file:", "from .base import BaseTrainer from ..registry import TRAINERS @TRAINERS.register_module() class", "isinstance(widths[0], (list, tuple)): group_as = ' --group_as ' + '", "if t['type'] == 'Resize'][0][ 'img_scale'] else: img_shape = [t for", "False if not hasattr(cfg.model, 'bbox_head') or not cfg.model.bbox_head.type == 'SSDHead':", "update_config = ' --update_config' update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(\" \", \"\")}'", "License. You may obtain a copy of the License at", "BaseTrainer from ..registry import TRAINERS @TRAINERS.register_module() class MMDetectionTrainer(BaseTrainer): def __init__(self):", "cfg, config_path, update_config): if self.__is_clustering_needed(cfg): update_config = self.__cluster(cfg, config_path, update_config)", "cfg.total_epochs > 0: return False if not hasattr(cfg.model, 'bbox_head') or", "group_as = ' --group_as ' + ' '.join([str(len(w)) for w", "from ote import MMDETECTION_TOOLS from .base import BaseTrainer from ..registry", ".base import BaseTrainer from ..registry import TRAINERS @TRAINERS.register_module() class MMDetectionTrainer(BaseTrainer):", "for t in cfg.data.train.dataset.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] img_shape", "the License for the specific language governing permissions and limitations", "' --group_as ' + ' '.join([str(len(w)) for w in widths])", "may not use this file except in compliance with the", "= content['widths'], content['heights'] if not update_config: update_config = ' --update_config'", "in writing, software distributed under the License is distributed on", "= tempfile.NamedTemporaryFile(delete=False) out = f' --out {tmp_file.name}' if 'pipeline' in", "'Resize'][0][ 'img_scale'] else: img_shape = [t for t in cfg.data.train.dataset.pipeline", "required by applicable law or agreed to in writing, software", "return False if not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered': return False return", "implied. See the License for the specific language governing permissions", "--out {tmp_file.name}' if 'pipeline' in cfg.data.train: img_shape = [t for", "in cfg.data.train.pipeline if t['type'] == 'Resize'][0][ 'img_scale'] else: img_shape =", "w in widths]) config = ' --config ' + config_path", "as src_file: content = json.load(src_file) widths, heights = content['widths'], content['heights']", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "= ' --n_clust ' + str(n_clust) group_as = '' if", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "t['type'] == 'Resize'][0][ 'img_scale'] else: img_shape = [t for t", "License. \"\"\" import json import logging import subprocess import tempfile", "config_path, update_config) return update_config @staticmethod def __is_clustering_needed(cfg): if cfg.total_epochs >", "== 'SSDAnchorGeneratorClustered': return False return True @staticmethod def __cluster(cfg, config_path,", "def __cluster(cfg, config_path, update_config): logging.info('Clustering started...') widths = cfg.model.bbox_head.anchor_generator.widths n_clust", "img_shape = [t for t in cfg.data.train.dataset.pipeline if t['type'] ==", "import subprocess import tempfile from ote import MMDETECTION_TOOLS from .base", "group_as = '' if isinstance(widths[0], (list, tuple)): group_as = '", "def __init__(self): super(MMDetectionTrainer, self).__init__() def _get_tools_dir(self): return MMDETECTION_TOOLS def _add_extra_args(self,", "in widths: n_clust += len(w) if isinstance(w, (list, tuple)) else", "limitations under the License. \"\"\" import json import logging import", "img_shape = [t for t in cfg.data.train.pipeline if t['type'] ==", "len(w) if isinstance(w, (list, tuple)) else 1 n_clust = '", "' --update_config' update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(\" \", \"\")}' update_config +=" ]
[ "complixities # for each file files = {} # Build", "containing the number of potentially duplicated # constants by file", "of potentially duplicated # constants by file for result in", "in: ([^:]+).+\\n?', cyclostats) files = {} # Build an array", "occurrence\\(s\\) of \\\"(.+)\\\" found in: ([^:]+).+\\n?', cyclostats) files = {}", "import json import subprocess import re import statistics def get_complexity():", "files def get_duplicate_const_strings(): # Load the const string duplication info", "if f in complexity else 0, 'duplicateConstStrings': duplicate_const_strings[f] if f", "for result in results: if result[0] in files: files[result[0]] =", "complexity = get_complexity() duplicate_const_strings = get_duplicate_const_strings() files = set() files.update(complexity.keys())", "to keep track of the complixities # for each file", "highest of the two # middle entries if needed) for", "for name, values in files.items(): files[name] = statistics.median_high(values) return files", "value (picking the highest of the two # middle entries", "# Build an array containing the number of potentially duplicated", "by file for result in results: if result[0] in files:", "files[result[0]] = 1 return files # Main service body if", "# for each file files = {} # Build an", "Load the const string duplication info cyclostats = subprocess.check_output(['./goconst', './repo/...']).decode(\"utf-8\")", "for each file for result in results: if result[3] in", "other occurrence\\(s\\) of \\\"(.+)\\\" found in: ([^:]+).+\\n?', cyclostats) files =", "files = {} # Build an array containing the number", "cyclostats = subprocess.check_output(['./goconst', './repo/...']).decode(\"utf-8\") results = re.findall('([^:]+).+ other occurrence\\(s\\) of", "middle entries if needed) for each file for name, values", "keep track of the complixities # for each file files", "an array of complexities for each file for result in", "the cyclomatic complexity info cyclostats = subprocess.check_output(['./gocyclo', 'repo']).decode(\"utf-8\") results =", "entries if needed) for each file for name, values in", "duplicated # constants by file for result in results: if", "import subprocess import re import statistics def get_complexity(): # Load", "cyclomatic complexity info cyclostats = subprocess.check_output(['./gocyclo', 'repo']).decode(\"utf-8\") results = re.findall('([0-9]+)\\s([^\\s]+)\\s([^\\s]+)\\s([^:]+):([0-9]+):([0-9]+)',", "# Pick out the median value (picking the highest of", "= re.findall('([^:]+).+ other occurrence\\(s\\) of \\\"(.+)\\\" found in: ([^:]+).+\\n?', cyclostats)", "= get_duplicate_const_strings() files = set() files.update(complexity.keys()) files.update(duplicate_const_strings.keys()) result = []", "in which to keep track of the complixities # for", "else: files[result[0]] = 1 return files # Main service body", "files[result[3]] = [int(result[0])] # Pick out the median value (picking", "def get_duplicate_const_strings(): # Load the const string duplication info cyclostats", "{} # Build an array containing the number of potentially", "dictionary in which to keep track of the complixities #", "else: files[result[3]] = [int(result[0])] # Pick out the median value", "= get_complexity() duplicate_const_strings = get_duplicate_const_strings() files = set() files.update(complexity.keys()) files.update(duplicate_const_strings.keys())", "files.update(duplicate_const_strings.keys()) result = [] for f in files: result.append({ 'filename':", "array containing the number of potentially duplicated # constants by", "[int(result[0])] # Pick out the median value (picking the highest", "0, 'duplicateConstStrings': duplicate_const_strings[f] if f in duplicate_const_strings else 0 })", "([^:]+).+\\n?', cyclostats) files = {} # Build an array containing", "'./repo/...']).decode(\"utf-8\") results = re.findall('([^:]+).+ other occurrence\\(s\\) of \\\"(.+)\\\" found in:", "of \\\"(.+)\\\" found in: ([^:]+).+\\n?', cyclostats) files = {} #", "files[result[0]] = files[result[0]]+1 else: files[result[0]] = 1 return files #", "results: if result[0] in files: files[result[0]] = files[result[0]]+1 else: files[result[0]]", "Setup a dictionary in which to keep track of the", "= 1 return files # Main service body if __name__", "set() files.update(complexity.keys()) files.update(duplicate_const_strings.keys()) result = [] for f in files:", "potentially duplicated # constants by file for result in results:", "the median value (picking the highest of the two #", "# Load the const string duplication info cyclostats = subprocess.check_output(['./goconst',", "'repo']).decode(\"utf-8\") results = re.findall('([0-9]+)\\s([^\\s]+)\\s([^\\s]+)\\s([^:]+):([0-9]+):([0-9]+)', cyclostats) # Setup a dictionary in", "Load the cyclomatic complexity info cyclostats = subprocess.check_output(['./gocyclo', 'repo']).decode(\"utf-8\") results", "# middle entries if needed) for each file for name,", "which to keep track of the complixities # for each", "result[0] in files: files[result[0]] = files[result[0]]+1 else: files[result[0]] = 1", "f, 'cyclomaticComplexity': complexity[f] if f in complexity else 0, 'duplicateConstStrings':", "subprocess.check_output(['./gocyclo', 'repo']).decode(\"utf-8\") results = re.findall('([0-9]+)\\s([^\\s]+)\\s([^\\s]+)\\s([^:]+):([0-9]+):([0-9]+)', cyclostats) # Setup a dictionary", "= {} # Build an array of complexities for each", "of the two # middle entries if needed) for each", "files = {} # Build an array of complexities for", "out the median value (picking the highest of the two", "# Build an array of complexities for each file for", "info cyclostats = subprocess.check_output(['./goconst', './repo/...']).decode(\"utf-8\") results = re.findall('([^:]+).+ other occurrence\\(s\\)", "result = [] for f in files: result.append({ 'filename': f,", "= files[result[0]]+1 else: files[result[0]] = 1 return files # Main", "a dictionary in which to keep track of the complixities", "results = re.findall('([0-9]+)\\s([^\\s]+)\\s([^\\s]+)\\s([^:]+):([0-9]+):([0-9]+)', cyclostats) # Setup a dictionary in which", "service body if __name__ == \"__main__\": complexity = get_complexity() duplicate_const_strings", "# Load the cyclomatic complexity info cyclostats = subprocess.check_output(['./gocyclo', 'repo']).decode(\"utf-8\")", "results: if result[3] in files: files[result[3]].append(int(result[0])) else: files[result[3]] = [int(result[0])]", "files[result[0]]+1 else: files[result[0]] = 1 return files # Main service", "files: result.append({ 'filename': f, 'cyclomaticComplexity': complexity[f] if f in complexity", "found in: ([^:]+).+\\n?', cyclostats) files = {} # Build an", "in results: if result[0] in files: files[result[0]] = files[result[0]]+1 else:", "needed) for each file for name, values in files.items(): files[name]", "get_duplicate_const_strings() files = set() files.update(complexity.keys()) files.update(duplicate_const_strings.keys()) result = [] for", "# constants by file for result in results: if result[0]", "in files.items(): files[name] = statistics.median_high(values) return files def get_duplicate_const_strings(): #", "else 0, 'duplicateConstStrings': duplicate_const_strings[f] if f in duplicate_const_strings else 0", "get_complexity() duplicate_const_strings = get_duplicate_const_strings() files = set() files.update(complexity.keys()) files.update(duplicate_const_strings.keys()) result", "import sys import json import subprocess import re import statistics", "result in results: if result[0] in files: files[result[0]] = files[result[0]]+1", "the const string duplication info cyclostats = subprocess.check_output(['./goconst', './repo/...']).decode(\"utf-8\") results", "cyclostats) # Setup a dictionary in which to keep track", "cyclostats = subprocess.check_output(['./gocyclo', 'repo']).decode(\"utf-8\") results = re.findall('([0-9]+)\\s([^\\s]+)\\s([^\\s]+)\\s([^:]+):([0-9]+):([0-9]+)', cyclostats) # Setup", "complexity[f] if f in complexity else 0, 'duplicateConstStrings': duplicate_const_strings[f] if", "Build an array of complexities for each file for result", "in results: if result[3] in files: files[result[3]].append(int(result[0])) else: files[result[3]] =", "import re import statistics def get_complexity(): # Load the cyclomatic", "values in files.items(): files[name] = statistics.median_high(values) return files def get_duplicate_const_strings():", "Build an array containing the number of potentially duplicated #", "files[result[3]].append(int(result[0])) else: files[result[3]] = [int(result[0])] # Pick out the median", "two # middle entries if needed) for each file for", "statistics.median_high(values) return files def get_duplicate_const_strings(): # Load the const string", "constants by file for result in results: if result[0] in", "in complexity else 0, 'duplicateConstStrings': duplicate_const_strings[f] if f in duplicate_const_strings", "if result[3] in files: files[result[3]].append(int(result[0])) else: files[result[3]] = [int(result[0])] #", "each file for name, values in files.items(): files[name] = statistics.median_high(values)", "track of the complixities # for each file files =", "if __name__ == \"__main__\": complexity = get_complexity() duplicate_const_strings = get_duplicate_const_strings()", "result.append({ 'filename': f, 'cyclomaticComplexity': complexity[f] if f in complexity else", "{} # Build an array of complexities for each file", "files # Main service body if __name__ == \"__main__\": complexity", "get_complexity(): # Load the cyclomatic complexity info cyclostats = subprocess.check_output(['./gocyclo',", "result in results: if result[3] in files: files[result[3]].append(int(result[0])) else: files[result[3]]", "Pick out the median value (picking the highest of the", "subprocess.check_output(['./goconst', './repo/...']).decode(\"utf-8\") results = re.findall('([^:]+).+ other occurrence\\(s\\) of \\\"(.+)\\\" found", "= subprocess.check_output(['./goconst', './repo/...']).decode(\"utf-8\") results = re.findall('([^:]+).+ other occurrence\\(s\\) of \\\"(.+)\\\"", "the highest of the two # middle entries if needed)", "the number of potentially duplicated # constants by file for", "info cyclostats = subprocess.check_output(['./gocyclo', 'repo']).decode(\"utf-8\") results = re.findall('([0-9]+)\\s([^\\s]+)\\s([^\\s]+)\\s([^:]+):([0-9]+):([0-9]+)', cyclostats) #", "files: files[result[3]].append(int(result[0])) else: files[result[3]] = [int(result[0])] # Pick out the", "file for result in results: if result[0] in files: files[result[0]]", "each file files = {} # Build an array of", "file for name, values in files.items(): files[name] = statistics.median_high(values) return", "duplication info cyclostats = subprocess.check_output(['./goconst', './repo/...']).decode(\"utf-8\") results = re.findall('([^:]+).+ other", "median value (picking the highest of the two # middle", "return files # Main service body if __name__ == \"__main__\":", "[] for f in files: result.append({ 'filename': f, 'cyclomaticComplexity': complexity[f]", "Main service body if __name__ == \"__main__\": complexity = get_complexity()", "import statistics def get_complexity(): # Load the cyclomatic complexity info", "for result in results: if result[3] in files: files[result[3]].append(int(result[0])) else:", "files[name] = statistics.median_high(values) return files def get_duplicate_const_strings(): # Load the", "array of complexities for each file for result in results:", "in files: files[result[0]] = files[result[0]]+1 else: files[result[0]] = 1 return", "duplicate_const_strings = get_duplicate_const_strings() files = set() files.update(complexity.keys()) files.update(duplicate_const_strings.keys()) result =", "complexities for each file for result in results: if result[3]", "of the complixities # for each file files = {}", "__name__ == \"__main__\": complexity = get_complexity() duplicate_const_strings = get_duplicate_const_strings() files", "f in files: result.append({ 'filename': f, 'cyclomaticComplexity': complexity[f] if f", "file for result in results: if result[3] in files: files[result[3]].append(int(result[0]))", "each file for result in results: if result[3] in files:", "complexity else 0, 'duplicateConstStrings': duplicate_const_strings[f] if f in duplicate_const_strings else", "for each file for name, values in files.items(): files[name] =", "re.findall('([^:]+).+ other occurrence\\(s\\) of \\\"(.+)\\\" found in: ([^:]+).+\\n?', cyclostats) files", "complexity info cyclostats = subprocess.check_output(['./gocyclo', 'repo']).decode(\"utf-8\") results = re.findall('([0-9]+)\\s([^\\s]+)\\s([^\\s]+)\\s([^:]+):([0-9]+):([0-9]+)', cyclostats)", "= re.findall('([0-9]+)\\s([^\\s]+)\\s([^\\s]+)\\s([^:]+):([0-9]+):([0-9]+)', cyclostats) # Setup a dictionary in which to", "in files: files[result[3]].append(int(result[0])) else: files[result[3]] = [int(result[0])] # Pick out", "body if __name__ == \"__main__\": complexity = get_complexity() duplicate_const_strings =", "result[3] in files: files[result[3]].append(int(result[0])) else: files[result[3]] = [int(result[0])] # Pick", "an array containing the number of potentially duplicated # constants", "file files = {} # Build an array of complexities", "1 return files # Main service body if __name__ ==", "= subprocess.check_output(['./gocyclo', 'repo']).decode(\"utf-8\") results = re.findall('([0-9]+)\\s([^\\s]+)\\s([^\\s]+)\\s([^:]+):([0-9]+):([0-9]+)', cyclostats) # Setup a", "json import subprocess import re import statistics def get_complexity(): #", "'filename': f, 'cyclomaticComplexity': complexity[f] if f in complexity else 0,", "\\\"(.+)\\\" found in: ([^:]+).+\\n?', cyclostats) files = {} # Build", "re import statistics def get_complexity(): # Load the cyclomatic complexity", "= statistics.median_high(values) return files def get_duplicate_const_strings(): # Load the const", "== \"__main__\": complexity = get_complexity() duplicate_const_strings = get_duplicate_const_strings() files =", "re.findall('([0-9]+)\\s([^\\s]+)\\s([^\\s]+)\\s([^:]+):([0-9]+):([0-9]+)', cyclostats) # Setup a dictionary in which to keep", "= [int(result[0])] # Pick out the median value (picking the", "def get_complexity(): # Load the cyclomatic complexity info cyclostats =", "the two # middle entries if needed) for each file", "sys import json import subprocess import re import statistics def", "the complixities # for each file files = {} #", "string duplication info cyclostats = subprocess.check_output(['./goconst', './repo/...']).decode(\"utf-8\") results = re.findall('([^:]+).+", "get_duplicate_const_strings(): # Load the const string duplication info cyclostats =", "cyclostats) files = {} # Build an array containing the", "statistics def get_complexity(): # Load the cyclomatic complexity info cyclostats", "# Setup a dictionary in which to keep track of", "if needed) for each file for name, values in files.items():", "for each file files = {} # Build an array", "files = set() files.update(complexity.keys()) files.update(duplicate_const_strings.keys()) result = [] for f", "'cyclomaticComplexity': complexity[f] if f in complexity else 0, 'duplicateConstStrings': duplicate_const_strings[f]", "\"__main__\": complexity = get_complexity() duplicate_const_strings = get_duplicate_const_strings() files = set()", "const string duplication info cyclostats = subprocess.check_output(['./goconst', './repo/...']).decode(\"utf-8\") results =", "# Main service body if __name__ == \"__main__\": complexity =", "= [] for f in files: result.append({ 'filename': f, 'cyclomaticComplexity':", "number of potentially duplicated # constants by file for result", "f in complexity else 0, 'duplicateConstStrings': duplicate_const_strings[f] if f in", "results = re.findall('([^:]+).+ other occurrence\\(s\\) of \\\"(.+)\\\" found in: ([^:]+).+\\n?',", "files.update(complexity.keys()) files.update(duplicate_const_strings.keys()) result = [] for f in files: result.append({", "return files def get_duplicate_const_strings(): # Load the const string duplication", "for f in files: result.append({ 'filename': f, 'cyclomaticComplexity': complexity[f] if", "subprocess import re import statistics def get_complexity(): # Load the", "'duplicateConstStrings': duplicate_const_strings[f] if f in duplicate_const_strings else 0 }) print(json.dumps(result))", "= set() files.update(complexity.keys()) files.update(duplicate_const_strings.keys()) result = [] for f in", "= {} # Build an array containing the number of", "name, values in files.items(): files[name] = statistics.median_high(values) return files def", "files: files[result[0]] = files[result[0]]+1 else: files[result[0]] = 1 return files", "in files: result.append({ 'filename': f, 'cyclomaticComplexity': complexity[f] if f in", "files.items(): files[name] = statistics.median_high(values) return files def get_duplicate_const_strings(): # Load", "of complexities for each file for result in results: if", "if result[0] in files: files[result[0]] = files[result[0]]+1 else: files[result[0]] =", "(picking the highest of the two # middle entries if" ]
[ "bool(_get_error_list(code)) i1 = 'from __future__ import division' i2 = 'from", "('1 +\\n', [(1, 3)]), ('1 +\\n2 +', [(1, 3), (2,", "can't assign to keyword\", 'SyntaxError: cannot assign to __debug__'], line_nr", "0)]), ('def x():\\n 1\\n 2', [(3, 0)]), ('def x():\\n 1\\n", "+ '\\n' + i2) assert not is_issue('\"\";' + i1) assert", "the internal function might change over time. \"\"\" def get_msg(end,", "# It's as simple as either an error or not.", "('def x():\\n1', [(2, 0)]), ] ) def test_indentation_errors(code, positions): assert_comparison(code,", "expression here\"], line_nr elif wanted == 'SyntaxError: f-string: unterminated string':", "a bit of quality assurance that this works through versions,", "get_msgs(r'\\UXXXXXXXX') error, = _get_error_list(r'u\"\\N{}\"', version=each_version) assert error.message == get_msg(r'malformed \\N", "'message'), [ (\"f'{1+}'\", ('invalid syntax')), (r'f\"\\\"', ('invalid syntax')), (r'fr\"\\\"', ('invalid", "* * 2', [(1, 4)]), ('?\\n1\\n?', [(1, 0), (3, 0)]),", "('?\\n?', [(1, 0), (2, 0)]), ('? * ?', [(1, 0)]),", "= 'try: pass\\nexcept: pass\\nexcept X: pass' wanted, line_nr = _get_actual_exception(code)", "i2) assert not is_issue('\"\";%s;%s ', i1, i2) assert not is_issue('\"\";%s\\n%s", "character name', to=6) # Finally bytes. error, = _get_error_list(r'b\"\\x\"', version=each_version)", "as assignment target': # Python 3.4/3.4 have a bit of", "1\\n 2', [(3, 0)]), ('def x():\\n 1\\n 2', [(3, 0)]),", "like '\\x' # that are oddly enough not SyntaxErrors. wanted", "use starred expression here\"], line_nr elif wanted == 'SyntaxError: f-string:", "', i1, i2) assert is_issue('1;' + i1) assert is_issue('1\\n' +", "always the same / incorrect in Python 3.8. \"\"\" if", "def nofoo():[x async for x in []]' wanted, line_nr =", "wanted == 'SyntaxError: can use starred expression only as assignment", "add_func=True) is None assert get_error(20) assert get_error(20, add_func=True) def test_future_import_first():", "is_issue(i1 + '\\n' + i2) assert not is_issue('\"\";' + i1)", "example doesn't work with FAILING_EXAMPLES, because the line numbers are", "get_msg(r'malformed \\N character escape', to=2) error, = _get_error_list(r'u\"\\N{foo}\"', version=each_version) assert", "code = 'async def foo():\\n def nofoo():[x async for x", "assert not is_issue('\"\"\\n' + i1) assert not is_issue('\"\"\\n%s\\n%s', i1, i2)", "is_issue('\"\"\\n%s;%s', i1, i2) assert not is_issue('\"\";%s;%s ', i1, i2) assert", "module for now. pytestmark = pytest.mark.skip() def _get_error_list(code, version=None): grammar", "elif wanted == \"SyntaxError: f-string: single '}' is not allowed\":", "= 'def x():\\n if x:\\n' assert not _get_error_list(build_nested('pass', 49, base=base))", "syntax')), (r'fr\"\\\"', ('invalid syntax')), ] ) def test_invalid_fstrings(code, message): \"\"\"", "# Somehow in Python3.3 the SyntaxError().lineno is sometimes None assert", "error, = _get_error_list(code) assert error.message in wanted assert line_nr !=", "assert error.message in get_msgs(r'\\xXX') error, = _get_error_list(r'u\"\\u\"', version=each_version) assert error.message", "wanted = 'SyntaxError: invalid syntax' elif wanted == \"SyntaxError: f-string:", "syntax errors and indentation errors. \"\"\" import sys import warnings", "certain places. But in others this error makes sense. return", "error, = _get_error_list(r'u\"\\u\"', version=each_version) assert error.message in get_msgs(r'\\uXXXX') error, =", "def assert_comparison(code, error_code, positions): errors = [(error.start_pos, error.code) for error", "time. \"\"\" def get_msg(end, to=1): base = \"SyntaxError: (unicode error)", "change over time. \"\"\" def get_msg(end, to=1): base = \"SyntaxError:", "should raise an exception.\" # SyntaxError # Python 2.6 has", "in get_msgs(r'\\uXXXX') error, = _get_error_list(r'u\"\\U\"', version=each_version) assert error.message in get_msgs(r'\\UXXXXXXXX')", "doesn't # really matter. code = 'try: pass\\nexcept: pass\\nexcept X:", "an error or not. warnings.filterwarnings('ignore', category=SyntaxWarning) try: compile(code, '<unknown>', 'exec')", "available in Python 3. wanted += ' at position 0'", "wanted = 'SyntaxError: (value error) ' + str(e) line_nr =", "+ i1) assert is_issue('\"\";1\\n' + i1) assert is_issue('\"\"\\n%s\\nfrom x import", "build_nested if is_pypy: # The errors in PyPy might be", "a different warning than 3.5/3.6 in # certain places. But", "blocks' return errors[0] return None assert get_error(19) is None assert", "wanted == 'SyntaxError: f-string: empty expression not allowed': wanted =", "_get_error_list(build_nested('pass', 100)) base = 'def x():\\n if x:\\n' assert not", "others this error makes sense. return [wanted, \"SyntaxError: can't use", "_get_error_list(build_nested('pass', 99)) assert _get_error_list(build_nested('pass', 100)) base = 'def x():\\n if", "that this works through versions, because the internal function might", "statically nested blocks' return errors[0] return None assert get_error(19) is", "for x in []]' wanted, line_nr = _get_actual_exception(code) errors =", "f-string expression part cannot include '#'\": wanted = 'SyntaxError: invalid", "with FAILING_EXAMPLES, because the line numbers are not always the", "= _get_actual_exception(code) errors = _get_error_list(code) actual = None if errors:", "r'fr\"\\\\\\\"\"', r\"print(f'Some {x:.2f} and some {y}')\", ] ) def test_valid_fstrings(code):", "else: assert False, \"The piece of code should raise an", "__debug__' elif wanted == 'SyntaxError: can use starred expression only", "For this error the position seemed to be one line", "wanted = 'SyntaxError: cannot assign to __debug__' elif wanted ==", "literal\", \"SyntaxError: unexpected character after line continuation character\", ], line_nr", "invalid syntax' else: assert message == 'SyntaxError: named arguments must", "follow bare *' works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *,", "in Python 3. wanted += ' at position 0' assert", "'SyntaxError: cannot assign to __debug__' elif wanted == 'SyntaxError: can", "Unicode character name', to=6) # Finally bytes. error, = _get_error_list(r'b\"\\x\"',", "= 'from __future__ import absolute_import' assert not is_issue(i1) assert not", "[(1, 0), (3, 0)]), ] ) def test_syntax_errors(code, positions): assert_comparison(code,", "works_not_in_py.get_error_message('def foo(*, **dict): pass') message = works_not_in_py.get_error_message('def foo(*): pass') if", "49, base=base)) assert _get_error_list(build_nested('pass', 50, base=base)) @pytest.mark.parametrize( 'code', [ \"f'{*args,}'\",", "*, name=1): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1, **dct): pass') def", "2', [(3, 0)]), ('def x():\\n 1\\n 2', [(3, 0)]), ('def", "can use starred expression only as assignment target': # Python", "\"SyntaxError: f-string: single '}' is not allowed\": wanted = 'SyntaxError:", "this is the better position. assert error.start_pos[0] == 2 def", "matter. code = 'try: pass\\nexcept: pass\\nexcept X: pass' wanted, line_nr", "positional argument follows keyword argument' elif wanted == 'SyntaxError: assignment", "here\"], line_nr elif wanted == 'SyntaxError: f-string: unterminated string': wanted", "%s escape\" % escape)) error, = _get_error_list(r'u\"\\x\"', version=each_version) assert error.message", "error) invalid \\x escape' if sys.version_info >= (3, 0): #", "error.message == get_msg(r'unknown Unicode character name', to=6) # Finally bytes.", "i2) assert not is_issue(i1 + '\\n' + i2) assert not", "2.6 has a bit different error messages here, so skip", "@pytest.mark.parametrize('code', FAILING_EXAMPLES) def test_python_exception_matches(code): wanted, line_nr = _get_actual_exception(code) errors =", "keyword': return [wanted, \"SyntaxError: can't assign to keyword\", 'SyntaxError: cannot", "but that doesn't # really matter. code = 'try: pass\\nexcept:", "error.message assert actual in wanted # Somehow in Python3.3 the", "line_nr != error.start_pos[0] # I think this is the better", "(' 1', [(1, 0)]), ('def x():\\n 1\\n 2', [(3, 0)]),", "code should raise an exception.\" # SyntaxError # Python 2.6", "= _get_error_list(code) actual = None if errors: error, = errors", "For whatever reason this is zero in Python 3.8+ @pytest.mark.parametrize(", "sys.version_info[:2] == (2, 6) and wanted == 'SyntaxError: unexpected EOF", "get_error(19) is None assert get_error(19, add_func=True) is None assert get_error(20)", "character after line continuation character\", ], line_nr elif wanted ==", "r'fr\"\\\"\"', r'fr\"\\\\\\\"\"', r\"print(f'Some {x:.2f} and some {y}')\", ] ) def", "works_not_in_py.get_error_message('def foo(*): pass') if works_not_in_py.version.startswith('2'): assert message == 'SyntaxError: invalid", "absolute_import' assert not is_issue(i1) assert not is_issue(i1 + ';' +", "== get_msg(r'malformed \\N character escape', to=2) error, = _get_error_list(r'u\"\\N{foo}\"', version=each_version)", "version=each_version) assert error.message == get_msg(r'unknown Unicode character name', to=6) #", "escape' if sys.version_info >= (3, 0): # The positioning information", "('def x():\\n 1\\n 2', [(3, 0)]), ('def x():\\n 1\\n 2',", "as either an error or not. warnings.filterwarnings('ignore', category=SyntaxWarning) try: compile(code,", "'}'\": wanted = 'SyntaxError: EOL while scanning string literal' elif", "message = works_not_in_py.get_error_message('def foo(*, **dict): pass') message = works_not_in_py.get_error_message('def foo(*):", "elif wanted == \"SyntaxError: f-string expression part cannot include '#'\":", "get_error(20) assert get_error(20, add_func=True) def test_future_import_first(): def is_issue(code, *args): code", "handled differntly in 3.6 and other versions. Therefore check specifically", "assert_comparison(code, error_code, positions): errors = [(error.start_pos, error.code) for error in", "might change over time. \"\"\" def get_msg(end, to=1): base =", "add_func=True) def test_future_import_first(): def is_issue(code, *args): code = code %", "elif wanted == 'SyntaxError: f-string: empty expression not allowed': wanted", "_get_error_list(r'u\"\\u\"', version=each_version) assert error.message in get_msgs(r'\\uXXXX') error, = _get_error_list(r'u\"\\U\"', version=each_version)", "('?', [(1, 0)]), ('??', [(1, 0)]), ('? ?', [(1, 0)]),", "+ 2', []), ('[\\n', [(2, 0)]), ('[\\ndef x(): pass', [(2,", "different warning than 3.5/3.6 in # certain places. But in", "+ i1) assert not is_issue('\"\"\\n' + i1) assert not is_issue('\"\"\\n%s\\n%s',", "version=None): grammar = parso.load_grammar(version=version) tree = grammar.parse(code) return list(grammar.iter_errors(tree)) def", "versions, because the internal function might change over time. \"\"\"", "character\", ], line_nr elif wanted == \"SyntaxError: f-string: expecting '}'\":", "SyntaxError().lineno is sometimes None assert line_nr is None or line_nr", "3), (2, 3)]), ('x + 2', []), ('[\\n', [(2, 0)]),", "versions. code = 'async def foo():\\n def nofoo():[x async for", "(3, 0)]), ] ) def test_syntax_errors(code, positions): assert_comparison(code, 901, positions)", "foo(*): pass') if works_not_in_py.version.startswith('2'): assert message == 'SyntaxError: invalid syntax'", "I think this is the better position. assert error.start_pos[0] ==", "line_nr elif wanted == 'SyntaxError: assignment to None': # Python", "is_issue('%s\\n\"\"\\n%s', i1, i2) def test_named_argument_issues(works_not_in_py): message = works_not_in_py.get_error_message('def foo(*, **dict):", "one line off, but that doesn't # really matter. code", "name=1): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1, **dct): pass') def test_escape_decode_literals(each_version):", "return build(new_code, depth - 1) def get_error(depth, add_func=False): code =", "= 'async def foo():\\n def nofoo():[x async for x in", "i1, i2) def test_named_argument_issues(works_not_in_py): message = works_not_in_py.get_error_message('def foo(*, **dict): pass')", "in _get_error_list(code)] assert [(pos, error_code) for pos in positions] ==", "wanted == 'SyntaxError: can not assign to __debug__': # Python", "character escape', to=2) error, = _get_error_list(r'u\"\\N{foo}\"', version=each_version) assert error.message ==", "= _get_error_list(r'b\"\\x\"', version=each_version) wanted = r'SyntaxError: (value error) invalid \\x", "FAILING_EXAMPLES, because the line numbers are not always the same", "is_issue('1;' + i1) assert is_issue('1\\n' + i1) assert is_issue('\"\";1\\n' +", "with warnings.catch_warnings(): # We don't care about warnings where locals/globals", "not is_issue(i1 + '\\n' + i2) assert not is_issue('\"\";' +", "i2) def test_named_argument_issues(works_not_in_py): message = works_not_in_py.get_error_message('def foo(*, **dict): pass') message", "as simple as either an error or not. warnings.filterwarnings('ignore', category=SyntaxWarning)", "not assign to __debug__': # Python 2.6 does has a", "is_issue('\"\";' + i1) assert not is_issue('\"\";' + i1) assert not", "'positions'), [ ('1 +', [(1, 3)]), ('1 +\\n', [(1, 3)]),", "test_too_many_levels_of_indentation(): assert not _get_error_list(build_nested('pass', 99)) assert _get_error_list(build_nested('pass', 100)) base =", "because the line numbers are not always the same /", "about warnings where locals/globals misbehave here. # It's as simple", "_get_error_list(code) assert error.message in wanted assert line_nr != error.start_pos[0] #", "is without syntax errors. Here we make a bit of", "base=base)) @pytest.mark.parametrize( 'code', [ \"f'{*args,}'\", r'f\"\\\"\"', r'f\"\\\\\\\"\"', r'fr\"\\\"\"', r'fr\"\\\\\\\"\"', r\"print(f'Some", "i1) assert is_issue('\"\"\\n%s\\nfrom x import a\\n%s', i1, i2) assert is_issue('%s\\n\"\"\\n%s',", "the better position. assert error.start_pos[0] == 2 def test_statically_nested_blocks(): def", "= \"SyntaxError: (unicode error) 'unicodeescape' \" \\ \"codec can't decode", "these errors here. \"\"\" error, = _get_error_list(code, version='3.6') assert message", "wanted == 'SyntaxError: f-string: unterminated string': wanted = 'SyntaxError: EOL", "foo import (bar, rab, )\", ] ) def test_trailing_comma(code): errors", "# Python 2.6 does has a slightly different error. wanted", "None assert get_error(20) assert get_error(20, add_func=True) def test_future_import_first(): def is_issue(code,", "pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1, **dct): pass') def test_escape_decode_literals(each_version): \"\"\"", "[(2, 0)]), ('1+?', [(1, 2)]), ('?', [(1, 0)]), ('??', [(1,", "in previous versions. code = 'async def foo():\\n def nofoo():[x", "error, = _get_error_list(r'u\"\\N{}\"', version=each_version) assert error.message == get_msg(r'malformed \\N character", "' + e.msg line_nr = e.lineno except ValueError as e:", "= pytest.mark.skip() def _get_error_list(code, version=None): grammar = parso.load_grammar(version=version) tree =", "assert line_nr is None or line_nr == error.start_pos[0] def test_non_async_in_async():", "x(): pass', [(2, 0)]), ('[\\nif 1: pass', [(2, 0)]), ('1+?',", "[(1, 2)]), ('?', [(1, 0)]), ('??', [(1, 0)]), ('? ?',", "= 'SyntaxError: invalid syntax' if wanted == 'SyntaxError: non-keyword arg", "elif wanted == 'SyntaxError: assignment to None': # Python 2.6", "can not assign to __debug__': # Python 2.6 does has", "elif wanted == 'SyntaxError: can use starred expression only as", "But in others this error makes sense. return [wanted, \"SyntaxError:", "expecting '}'\": wanted = 'SyntaxError: EOL while scanning string literal'", "not is_issue('\"\";' + i1) assert not is_issue('\"\"\\n' + i1) assert", "code % args return bool(_get_error_list(code)) i1 = 'from __future__ import", "0)]), ('?\\n?', [(1, 0), (2, 0)]), ('? * ?', [(1,", "escaping is without syntax errors. Here we make a bit", "errors here. \"\"\" error, = _get_error_list(code, version='3.6') assert message in", "_get_error_list(r'b\"\\x\"', version=each_version) wanted = r'SyntaxError: (value error) invalid \\x escape'", "tree = grammar.parse(code) return list(grammar.iter_errors(tree)) def assert_comparison(code, error_code, positions): errors", "'SyntaxError: invalid syntax' elif wanted == \"SyntaxError: f-string: single '}'", "None else: assert False, \"The piece of code should raise", "escape', to=2) error, = _get_error_list(r'u\"\\N{foo}\"', version=each_version) assert error.message == get_msg(r'unknown", "== error.start_pos[0] else: assert line_nr == 0 # For whatever", "# I think this is the better position. assert error.start_pos[0]", "piece of code should raise an exception.\" # SyntaxError #", "single '}' is not allowed\": wanted = 'SyntaxError: invalid syntax'", "assert not is_issue('\"\";' + i1) assert not is_issue('\"\"\\n' + i1)", "+ end def get_msgs(escape): return (get_msg('end of string in escape", "import division' i2 = 'from __future__ import absolute_import' assert not", "'SyntaxError: invalid syntax' else: assert message == 'SyntaxError: named arguments", "'if 1:\\n' + indent(code) return build(new_code, depth - 1) def", "have a bit of a different warning than 3.5/3.6 in", "previous versions. code = 'async def foo():\\n def nofoo():[x async", "wanted == 'SyntaxError: assignment to None': # Python 2.6 does", "oddly enough not SyntaxErrors. wanted = 'SyntaxError: (value error) '", "wanted = 'SyntaxError: invalid syntax' elif wanted == \"SyntaxError: f-string", "string': wanted = 'SyntaxError: EOL while scanning string literal' elif", "'exec') except (SyntaxError, IndentationError) as e: wanted = e.__class__.__name__ +", "code = 'try: pass\\nexcept: pass\\nexcept X: pass' wanted, line_nr =", "# Finally bytes. error, = _get_error_list(r'b\"\\x\"', version=each_version) wanted = r'SyntaxError:", "def test_default_except_error_postition(): # For this error the position seemed to", ") def test_indentation_errors(code, positions): assert_comparison(code, 903, positions) def _get_actual_exception(code): with", "to None' elif wanted == 'SyntaxError: can not assign to", "assert actual in wanted if sys.version_info[:2] < (3, 8): assert", "if add_func: code = 'def bar():\\n' + indent(code) errors =", "EOL while scanning string literal' elif wanted == 'SyntaxError: f-string", "test_named_argument_issues(works_not_in_py): message = works_not_in_py.get_error_message('def foo(*, **dict): pass') message = works_not_in_py.get_error_message('def", "incorrect in Python 3.8. \"\"\" if sys.version_info[:2] < (3, 5):", "_get_error_list(code, version=None): grammar = parso.load_grammar(version=version) tree = grammar.parse(code) return list(grammar.iter_errors(tree))", "it. if sys.version_info[:2] == (2, 6) and wanted == 'SyntaxError:", "'SyntaxError: invalid syntax' elif wanted == \"SyntaxError: f-string expression part", "assert not is_issue(i1 + ';' + i2) assert not is_issue(i1", "assert get_error(19, add_func=True) is None assert get_error(20) assert get_error(20, add_func=True)", "i2) assert not is_issue('\"\";' + i1) assert not is_issue('\"\";' +", "**dict): pass') message = works_not_in_py.get_error_message('def foo(*): pass') if works_not_in_py.version.startswith('2'): assert", "pass') message = works_not_in_py.get_error_message('def foo(*): pass') if works_not_in_py.version.startswith('2'): assert message", "0)]), ('[\\nif 1: pass', [(2, 0)]), ('1+?', [(1, 2)]), ('?',", "that unicode/bytes escaping is without syntax errors. Here we make", "assert is_issue('\"\";1\\n' + i1) assert is_issue('\"\"\\n%s\\nfrom x import a\\n%s', i1,", "pass') if works_not_in_py.version.startswith('2'): assert message == 'SyntaxError: invalid syntax' else:", "else: assert message == 'SyntaxError: named arguments must follow bare", "parso.load_grammar(version=version) tree = grammar.parse(code) return list(grammar.iter_errors(tree)) def assert_comparison(code, error_code, positions):", "to __debug__' elif wanted == 'SyntaxError: can use starred expression", "depth - 1) def get_error(depth, add_func=False): code = build('foo', depth)", "make a bit of quality assurance that this works through", "in get_msgs(r'\\xXX') error, = _get_error_list(r'u\"\\u\"', version=each_version) assert error.message in get_msgs(r'\\uXXXX')", "assert not _get_error_list(build_nested('pass', 49, base=base)) assert _get_error_list(build_nested('pass', 50, base=base)) @pytest.mark.parametrize(", "from parso._compatibility import is_pypy from .failing_examples import FAILING_EXAMPLES, indent, build_nested", "syntax errors. Here we make a bit of quality assurance", "None if errors: error, = errors actual = error.message assert", "new_code = 'if 1:\\n' + indent(code) return build(new_code, depth -", "error.message in get_msgs(r'\\xXX') error, = _get_error_list(r'u\"\\u\"', version=each_version) assert error.message in", "0)]), ] ) def test_syntax_errors(code, positions): assert_comparison(code, 901, positions) @pytest.mark.parametrize(", "we make a bit of quality assurance that this works", "error, = _get_error_list(r'b\"\\x\"', version=each_version) wanted = r'SyntaxError: (value error) invalid", "error.code) for error in _get_error_list(code)] assert [(pos, error_code) for pos", "parso._compatibility import is_pypy from .failing_examples import FAILING_EXAMPLES, indent, build_nested if", "list(grammar.iter_errors(tree)) def assert_comparison(code, error_code, positions): errors = [(error.start_pos, error.code) for", "except (SyntaxError, IndentationError) as e: wanted = e.__class__.__name__ + ':", "wanted == 'SyntaxError: f-string expression part cannot include a backslash':", ") def test_valid_fstrings(code): assert not _get_error_list(code, version='3.6') @pytest.mark.parametrize( ('code', 'message'),", "== 0: return code new_code = 'if 1:\\n' + indent(code)", "allowed': wanted = 'SyntaxError: invalid syntax' elif wanted == \"SyntaxError:", "[(1, 0)]), ('??', [(1, 0)]), ('? ?', [(1, 0)]), ('?\\n?',", "numbers are not always the same / incorrect in Python", "f-string expression part cannot include a backslash': return [ wanted,", "= _get_error_list(code) assert error.message in wanted assert line_nr != error.start_pos[0]", "while scanning string literal\", \"SyntaxError: unexpected character after line continuation", "return bool(_get_error_list(code)) i1 = 'from __future__ import division' i2 =", "+ * * 2', [(1, 4)]), ('?\\n1\\n?', [(1, 0), (3,", "invalid syntax' elif wanted == \"SyntaxError: f-string expression part cannot", "= _get_actual_exception(code) errors = _get_error_list(code) if errors: error, = errors", "assert error.message == get_msg(r'malformed \\N character escape', to=2) error, =", "this is zero in Python 3.8+ @pytest.mark.parametrize( ('code', 'positions'), [", "wanted if sys.version_info[:2] < (3, 8): assert line_nr == error.start_pos[0]", "('? ?', [(1, 0)]), ('?\\n?', [(1, 0), (2, 0)]), ('?", "0' assert error.message == wanted def test_too_many_levels_of_indentation(): assert not _get_error_list(build_nested('pass',", "part cannot include '#'\": wanted = 'SyntaxError: invalid syntax' elif", "Python 3. wanted += ' at position 0' assert error.message", "sometimes None assert line_nr is None or line_nr == error.start_pos[0]", "build(new_code, depth - 1) def get_error(depth, add_func=False): code = build('foo',", "@pytest.mark.parametrize( 'code', [ \"f'{*args,}'\", r'f\"\\\"\"', r'f\"\\\\\\\"\"', r'fr\"\\\"\"', r'fr\"\\\\\\\"\"', r\"print(f'Some {x:.2f}", "= None else: assert False, \"The piece of code should", "that doesn't # really matter. code = 'try: pass\\nexcept: pass\\nexcept", "i1) assert not is_issue('\"\"\\n' + i1) assert not is_issue('\"\"\\n%s\\n%s', i1,", "def test_valid_fstrings(code): assert not _get_error_list(code, version='3.6') @pytest.mark.parametrize( ('code', 'message'), [", "[(2, 0)]), ('[\\nif 1: pass', [(2, 0)]), ('1+?', [(1, 2)]),", "('1+?', [(1, 2)]), ('?', [(1, 0)]), ('??', [(1, 0)]), ('?", "foo(bar, *, name=1, **dct): pass') def test_escape_decode_literals(each_version): \"\"\" We are", "2)]), ('?', [(1, 0)]), ('??', [(1, 0)]), ('? ?', [(1,", "is sometimes None assert line_nr is None or line_nr ==", "i2) assert is_issue('%s\\n\"\"\\n%s', i1, i2) def test_named_argument_issues(works_not_in_py): message = works_not_in_py.get_error_message('def", "] ) def test_indentation_errors(code, positions): assert_comparison(code, 903, positions) def _get_actual_exception(code):", "actual = None if errors: error, = errors actual =", "2', [(3, 0)]), ('def x():\\n1', [(2, 0)]), ] ) def", "test_escape_decode_literals(each_version): \"\"\" We are using internal functions to assure that", "= e.lineno except ValueError as e: # The ValueError comes", "None assert line_nr is None or line_nr == error.start_pos[0] def", "The ValueError comes from byte literals in Python 2 like", "compile(code, '<unknown>', 'exec') except (SyntaxError, IndentationError) as e: wanted =", "nicer. wanted = 'SyntaxError: positional argument follows keyword argument' elif", "not is_issue('\"\"\\n%s;%s', i1, i2) assert not is_issue('\"\";%s;%s ', i1, i2)", "assert error.start_pos[0] == 2 def test_statically_nested_blocks(): def build(code, depth): if", "foo(bar, *, name=1): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1, **dct): pass')", "+ i1) assert not is_issue('\"\";' + i1) assert not is_issue('\"\"\\n'", "x():\\n1', [(2, 0)]), ] ) def test_indentation_errors(code, positions): assert_comparison(code, 903,", "return base + end def get_msgs(escape): return (get_msg('end of string", "None assert get_error(19, add_func=True) is None assert get_error(20) assert get_error(20,", "Raises multiple errors in previous versions. code = 'async def", "i1, i2) assert is_issue('%s\\n\"\"\\n%s', i1, i2) def test_named_argument_issues(works_not_in_py): message =", "= None if errors: error, = errors actual = error.message", "[(1, 0), (2, 0)]), ('? * ?', [(1, 0)]), ('1", "a slightly different error. wanted = 'SyntaxError: cannot assign to", "version='3.6') @pytest.mark.parametrize( ('code', 'message'), [ (\"f'{1+}'\", ('invalid syntax')), (r'f\"\\\"', ('invalid", "message in error.message @pytest.mark.parametrize( 'code', [ \"from foo import (\\nbar,\\n", "'async def foo():\\n def nofoo():[x async for x in []]'", "= _get_error_list(r'u\"\\N{}\"', version=each_version) assert error.message == get_msg(r'malformed \\N character escape',", "while parsing': wanted = 'SyntaxError: invalid syntax' if wanted ==", "errors = _get_error_list(code) if errors: error, = errors actual =", "might be different. Just skip the module for now. pytestmark", "_get_error_list(r'u\"\\N{foo}\"', version=each_version) assert error.message == get_msg(r'unknown Unicode character name', to=6)", "error, = _get_error_list(r'u\"\\x\"', version=each_version) assert error.message in get_msgs(r'\\xXX') error, =", "The positioning information is only available in Python 3. wanted", "\"\"\" error, = _get_error_list(code, version='3.6') assert message in error.message @pytest.mark.parametrize(", "\"\"\" def get_msg(end, to=1): base = \"SyntaxError: (unicode error) 'unicodeescape'", "not is_issue('\"\"\\n%s\\n%s', i1, i2) assert not is_issue('\"\"\\n%s;%s', i1, i2) assert", "EOF while parsing': wanted = 'SyntaxError: invalid syntax' if wanted", "has a bit different error messages here, so skip it.", "import warnings import pytest import parso from parso._compatibility import is_pypy", "if wanted == 'SyntaxError: non-keyword arg after keyword arg': #", "== \"SyntaxError: f-string expression part cannot include '#'\": wanted =", "only available in Python 3. wanted += ' at position", "sys.version_info[:2] < (3, 8): assert line_nr == error.start_pos[0] else: assert", "= errors actual = error.message assert actual in wanted #", "and other versions. Therefore check specifically for these errors here.", "test_non_async_in_async(): \"\"\" This example doesn't work with FAILING_EXAMPLES, because the", ") def test_syntax_errors(code, positions): assert_comparison(code, 901, positions) @pytest.mark.parametrize( ('code', 'positions'),", "'SyntaxError: invalid syntax' if wanted == 'SyntaxError: non-keyword arg after", "elif wanted == 'SyntaxError: f-string expression part cannot include a", "wanted assert line_nr != error.start_pos[0] # I think this is", "\"SyntaxError: unexpected character after line continuation character\", ], line_nr elif", "expression not allowed': wanted = 'SyntaxError: invalid syntax' elif wanted", "';' + i2) assert not is_issue(i1 + '\\n' + i2)", "better position. assert error.start_pos[0] == 2 def test_statically_nested_blocks(): def build(code,", "syntax' return [wanted], line_nr def test_default_except_error_postition(): # For this error", "string literal' elif wanted == 'SyntaxError: f-string: empty expression not", "sys.version_info >= (3, 0): # The positioning information is only", "# The positioning information is only available in Python 3.", "same / incorrect in Python 3.8. \"\"\" if sys.version_info[:2] <", "assert is_issue('1;' + i1) assert is_issue('1\\n' + i1) assert is_issue('\"\";1\\n'", "here. # It's as simple as either an error or", "positions) def _get_actual_exception(code): with warnings.catch_warnings(): # We don't care about", "division' i2 = 'from __future__ import absolute_import' assert not is_issue(i1)", "is_issue(i1 + ';' + i2) assert not is_issue(i1 + '\\n'", "than 3.5/3.6 in # certain places. But in others this", "get_error(depth, add_func=False): code = build('foo', depth) if add_func: code =", "= 'if 1:\\n' + indent(code) return build(new_code, depth - 1)", "if errors: assert errors[0].message == 'SyntaxError: too many statically nested", "+ i2) assert not is_issue('\"\";' + i1) assert not is_issue('\"\";'", "(r'fr\"\\\"', ('invalid syntax')), ] ) def test_invalid_fstrings(code, message): \"\"\" Some", "or not. warnings.filterwarnings('ignore', category=SyntaxWarning) try: compile(code, '<unknown>', 'exec') except (SyntaxError,", "bit different error messages here, so skip it. if sys.version_info[:2]", "skip it. if sys.version_info[:2] == (2, 6) and wanted ==", "cannot include '#'\": wanted = 'SyntaxError: invalid syntax' elif wanted", "are oddly enough not SyntaxErrors. wanted = 'SyntaxError: (value error)", "def build(code, depth): if depth == 0: return code new_code", "bare *' works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1):", "+= ' at position 0' assert error.message == wanted def", "?', [(1, 0)]), ('?\\n?', [(1, 0), (2, 0)]), ('? *", "3)]), ('1 +\\n2 +', [(1, 3), (2, 3)]), ('x +", "error in _get_error_list(code)] assert [(pos, error_code) for pos in positions]", "'SyntaxError: f-string expression part cannot include a backslash': return [", "\"\"\" import sys import warnings import pytest import parso from", "in wanted assert line_nr != error.start_pos[0] # I think this", "# certain places. But in others this error makes sense.", "< (3, 5): pytest.skip() # Raises multiple errors in previous", "== \"SyntaxError: f-string: expecting '}'\": wanted = 'SyntaxError: EOL while", "error.start_pos[0] == 2 def test_statically_nested_blocks(): def build(code, depth): if depth", "def test_named_argument_issues(works_not_in_py): message = works_not_in_py.get_error_message('def foo(*, **dict): pass') message =", "if parso finds syntax errors and indentation errors. \"\"\" import", "of a different warning than 3.5/3.6 in # certain places.", "errors = _get_error_list(code) if errors: assert errors[0].message == 'SyntaxError: too", "a bit of a different warning than 3.5/3.6 in #", "error_code) for pos in positions] == errors @pytest.mark.parametrize('code', FAILING_EXAMPLES) def", "unterminated string': wanted = 'SyntaxError: EOL while scanning string literal'", "3.4/3.4 have a bit of a different warning than 3.5/3.6", "of code should raise an exception.\" # SyntaxError # Python", "EOL while scanning string literal' elif wanted == 'SyntaxError: f-string:", "'try: pass\\nexcept: pass\\nexcept X: pass' wanted, line_nr = _get_actual_exception(code) error,", "wanted = r'SyntaxError: (value error) invalid \\x escape' if sys.version_info", "wanted, line_nr = _get_actual_exception(code) errors = _get_error_list(code) if errors: error,", "in wanted if sys.version_info[:2] < (3, 8): assert line_nr ==", "return [wanted], line_nr def test_default_except_error_postition(): # For this error the", "specifically for these errors here. \"\"\" error, = _get_error_list(code, version='3.6')", "pass', [(2, 0)]), ('1+?', [(1, 2)]), ('?', [(1, 0)]), ('??',", "base + end def get_msgs(escape): return (get_msg('end of string in", "== 'SyntaxError: too many statically nested blocks' return errors[0] return", "bytes in position 0-%s: \" % to return base +", "(3, 5): pytest.skip() # Raises multiple errors in previous versions.", "positions) @pytest.mark.parametrize( ('code', 'positions'), [ (' 1', [(1, 0)]), ('def", "does has a slightly different error. wanted = 'SyntaxError: cannot", "build(code, depth): if depth == 0: return code new_code =", "if depth == 0: return code new_code = 'if 1:\\n'", "_get_error_list(code, version='3.6') assert message in error.message @pytest.mark.parametrize( 'code', [ \"from", "def test_python_exception_matches(code): wanted, line_nr = _get_actual_exception(code) errors = _get_error_list(code) actual", "function might change over time. \"\"\" def get_msg(end, to=1): base", "positions): assert_comparison(code, 903, positions) def _get_actual_exception(code): with warnings.catch_warnings(): # We", "('code', 'positions'), [ ('1 +', [(1, 3)]), ('1 +\\n', [(1,", "wanted = 'SyntaxError: invalid syntax' if wanted == 'SyntaxError: non-keyword", "version=each_version) wanted = r'SyntaxError: (value error) invalid \\x escape' if", "0)]), ('? ?', [(1, 0)]), ('?\\n?', [(1, 0), (2, 0)]),", "== 'SyntaxError: assignment to keyword': return [wanted, \"SyntaxError: can't assign", "] ) def test_syntax_errors(code, positions): assert_comparison(code, 901, positions) @pytest.mark.parametrize( ('code',", "Testing if parso finds syntax errors and indentation errors. \"\"\"", "can't use starred expression here\"], line_nr elif wanted == 'SyntaxError:", "in get_msgs(r'\\UXXXXXXXX') error, = _get_error_list(r'u\"\\N{}\"', version=each_version) assert error.message == get_msg(r'malformed", "the SyntaxError().lineno is sometimes None assert line_nr is None or", "positions] == errors @pytest.mark.parametrize('code', FAILING_EXAMPLES) def test_python_exception_matches(code): wanted, line_nr =", "the same / incorrect in Python 3.8. \"\"\" if sys.version_info[:2]", "unicode/bytes escaping is without syntax errors. Here we make a", "assert not _get_error_list(code, version='3.6') @pytest.mark.parametrize( ('code', 'message'), [ (\"f'{1+}'\", ('invalid", "\\ \"codec can't decode bytes in position 0-%s: \" %", "\\N character escape', to=2) error, = _get_error_list(r'u\"\\N{foo}\"', version=each_version) assert error.message", "error the position seemed to be one line off, but", "'SyntaxError: non-keyword arg after keyword arg': # The python 3.5+", "_get_actual_exception(code) error, = _get_error_list(code) assert error.message in wanted assert line_nr", "\"\"\" This example doesn't work with FAILING_EXAMPLES, because the line", "error.message == get_msg(r'malformed \\N character escape', to=2) error, = _get_error_list(r'u\"\\N{foo}\"',", "rab, )\", ] ) def test_trailing_comma(code): errors = _get_error_list(code) assert", "= _get_actual_exception(code) error, = _get_error_list(code) assert error.message in wanted assert", "4)]), ('?\\n1\\n?', [(1, 0), (3, 0)]), ] ) def test_syntax_errors(code,", "('code', 'positions'), [ (' 1', [(1, 0)]), ('def x():\\n 1\\n", "i1) assert not is_issue('\"\"\\n%s\\n%s', i1, i2) assert not is_issue('\"\"\\n%s;%s', i1,", "depth): if depth == 0: return code new_code = 'if", "error, = _get_error_list(r'u\"\\N{foo}\"', version=each_version) assert error.message == get_msg(r'unknown Unicode character", "functions to assure that unicode/bytes escaping is without syntax errors.", "syntax')), (r'f\"\\\"', ('invalid syntax')), (r'fr\"\\\"', ('invalid syntax')), ] ) def", "_get_error_list(build_nested('pass', 49, base=base)) assert _get_error_list(build_nested('pass', 50, base=base)) @pytest.mark.parametrize( 'code', [", "not _get_error_list(build_nested('pass', 99)) assert _get_error_list(build_nested('pass', 100)) base = 'def x():\\n", "positions): assert_comparison(code, 901, positions) @pytest.mark.parametrize( ('code', 'positions'), [ (' 1',", "keyword\", 'SyntaxError: cannot assign to __debug__'], line_nr elif wanted ==", "line_nr def test_default_except_error_postition(): # For this error the position seemed", "+ i1) assert not is_issue('\"\"\\n%s\\n%s', i1, i2) assert not is_issue('\"\"\\n%s;%s',", "('?\\n1\\n?', [(1, 0), (3, 0)]), ] ) def test_syntax_errors(code, positions):", "think this is the better position. assert error.start_pos[0] == 2", "python 3.5+ way, a bit nicer. wanted = 'SyntaxError: positional", "error.start_pos[0] # I think this is the better position. assert", "i2) assert is_issue('1;' + i1) assert is_issue('1\\n' + i1) assert", "in 3.6 and other versions. Therefore check specifically for these", "== 'SyntaxError: invalid syntax' else: assert message == 'SyntaxError: named", "'SyntaxError: positional argument follows keyword argument' elif wanted == 'SyntaxError:", "Just skip the module for now. pytestmark = pytest.mark.skip() def", "= 'def bar():\\n' + indent(code) errors = _get_error_list(code) if errors:", "__debug__'], line_nr elif wanted == 'SyntaxError: assignment to None': #", "i2) assert not is_issue('\"\";%s\\n%s ', i1, i2) assert is_issue('1;' +", "assert is_issue('%s\\n\"\"\\n%s', i1, i2) def test_named_argument_issues(works_not_in_py): message = works_not_in_py.get_error_message('def foo(*,", "(value error) invalid \\x escape' if sys.version_info >= (3, 0):", "{x:.2f} and some {y}')\", ] ) def test_valid_fstrings(code): assert not", "'SyntaxError: assignment to keyword': return [wanted, \"SyntaxError: can't assign to", "def get_msgs(escape): return (get_msg('end of string in escape sequence'), get_msg(r\"truncated", "assert error.message in get_msgs(r'\\UXXXXXXXX') error, = _get_error_list(r'u\"\\N{}\"', version=each_version) assert error.message", "code = code % args return bool(_get_error_list(code)) i1 = 'from", "skip the module for now. pytestmark = pytest.mark.skip() def _get_error_list(code,", "\"\"\" if sys.version_info[:2] < (3, 5): pytest.skip() # Raises multiple", "+ str(e) line_nr = None else: assert False, \"The piece", "and wanted == 'SyntaxError: unexpected EOF while parsing': wanted =", "async for x in []]' wanted, line_nr = _get_actual_exception(code) errors", "parsing': wanted = 'SyntaxError: invalid syntax' if wanted == 'SyntaxError:", "wanted = 'SyntaxError: cannot assign to None' elif wanted ==", "the position seemed to be one line off, but that", "add_func: code = 'def bar():\\n' + indent(code) errors = _get_error_list(code)", "= code % args return bool(_get_error_list(code)) i1 = 'from __future__", "0)]), ('1 + * * 2', [(1, 4)]), ('?\\n1\\n?', [(1,", "grammar = parso.load_grammar(version=version) tree = grammar.parse(code) return list(grammar.iter_errors(tree)) def assert_comparison(code,", "arguments must follow bare *' works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass') works_not_in_py.assert_no_error_in_passing('def", "@pytest.mark.parametrize( ('code', 'message'), [ (\"f'{1+}'\", ('invalid syntax')), (r'f\"\\\"', ('invalid syntax')),", "= grammar.parse(code) return list(grammar.iter_errors(tree)) def assert_comparison(code, error_code, positions): errors =", "errors in PyPy might be different. Just skip the module", "Somehow in Python3.3 the SyntaxError().lineno is sometimes None assert line_nr", "the line numbers are not always the same / incorrect", "def test_too_many_levels_of_indentation(): assert not _get_error_list(build_nested('pass', 99)) assert _get_error_list(build_nested('pass', 100)) base", "for pos in positions] == errors @pytest.mark.parametrize('code', FAILING_EXAMPLES) def test_python_exception_matches(code):", "syntax')), ] ) def test_invalid_fstrings(code, message): \"\"\" Some fstring errors", "\"SyntaxError: can't assign to keyword\", 'SyntaxError: cannot assign to __debug__'],", "(value error) ' + str(e) line_nr = None else: assert", "foo(*, name): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar,", "escape\" % escape)) error, = _get_error_list(r'u\"\\x\"', version=each_version) assert error.message in", "indent, build_nested if is_pypy: # The errors in PyPy might", "x():\\n 1\\n 2', [(3, 0)]), ('def x():\\n 1\\n 2', [(3,", "str(e) line_nr = None else: assert False, \"The piece of", "'SyntaxError: can not assign to __debug__': # Python 2.6 does", "as e: # The ValueError comes from byte literals in", "(\\nbar,\\n rab,\\n)\", \"from foo import (bar, rab, )\", ] )", "after keyword arg': # The python 3.5+ way, a bit", "if works_not_in_py.version.startswith('2'): assert message == 'SyntaxError: invalid syntax' else: assert", "is_pypy: # The errors in PyPy might be different. Just", "bit nicer. wanted = 'SyntaxError: positional argument follows keyword argument'", "rab,\\n)\", \"from foo import (bar, rab, )\", ] ) def", "error.start_pos[0] def test_non_async_in_async(): \"\"\" This example doesn't work with FAILING_EXAMPLES,", "?', [(1, 0)]), ('1 + * * 2', [(1, 4)]),", "not is_issue('\"\";%s;%s ', i1, i2) assert not is_issue('\"\";%s\\n%s ', i1,", "import (bar, rab, )\", ] ) def test_trailing_comma(code): errors =", "= 'SyntaxError: EOL while scanning string literal' elif wanted ==", "= 'SyntaxError: invalid syntax' return [wanted], line_nr def test_default_except_error_postition(): #", "elif wanted == \"SyntaxError: f-string: expecting '}'\": wanted = 'SyntaxError:", "i1) assert is_issue('\"\";1\\n' + i1) assert is_issue('\"\"\\n%s\\nfrom x import a\\n%s',", "error) 'unicodeescape' \" \\ \"codec can't decode bytes in position", "to keyword': return [wanted, \"SyntaxError: can't assign to keyword\", 'SyntaxError:", "_get_error_list(r'u\"\\U\"', version=each_version) assert error.message in get_msgs(r'\\UXXXXXXXX') error, = _get_error_list(r'u\"\\N{}\"', version=each_version)", "3.6 and other versions. Therefore check specifically for these errors", "quality assurance that this works through versions, because the internal", "import absolute_import' assert not is_issue(i1) assert not is_issue(i1 + ';'", "pytest import parso from parso._compatibility import is_pypy from .failing_examples import", "(3, 8): assert line_nr == error.start_pos[0] else: assert line_nr ==", "return (get_msg('end of string in escape sequence'), get_msg(r\"truncated %s escape\"", "actual = error.message assert actual in wanted if sys.version_info[:2] <", "is_issue(code, *args): code = code % args return bool(_get_error_list(code)) i1", "] ) def test_trailing_comma(code): errors = _get_error_list(code) assert not errors", "8): assert line_nr == error.start_pos[0] else: assert line_nr == 0", "is zero in Python 3.8+ @pytest.mark.parametrize( ('code', 'positions'), [ ('1", "import parso from parso._compatibility import is_pypy from .failing_examples import FAILING_EXAMPLES,", "% to return base + end def get_msgs(escape): return (get_msg('end", "assert False, \"The piece of code should raise an exception.\"", "# Python 2.6 has a bit different error messages here,", "errors actual = error.message assert actual in wanted if sys.version_info[:2]", "positioning information is only available in Python 3. wanted +=", "test_python_exception_matches(code): wanted, line_nr = _get_actual_exception(code) errors = _get_error_list(code) actual =", "901, positions) @pytest.mark.parametrize( ('code', 'positions'), [ (' 1', [(1, 0)]),", "to __debug__'], line_nr elif wanted == 'SyntaxError: assignment to None':", "error.message in wanted assert line_nr != error.start_pos[0] # I think", "_get_error_list(code, version='3.6') @pytest.mark.parametrize( ('code', 'message'), [ (\"f'{1+}'\", ('invalid syntax')), (r'f\"\\\"',", "e.msg line_nr = e.lineno except ValueError as e: # The", "sense. return [wanted, \"SyntaxError: can't use starred expression here\"], line_nr", "error, = errors actual = error.message assert actual in wanted", "[(1, 0)]), ('? ?', [(1, 0)]), ('?\\n?', [(1, 0), (2,", "# We don't care about warnings where locals/globals misbehave here.", "assert _get_error_list(build_nested('pass', 100)) base = 'def x():\\n if x:\\n' assert", "test_statically_nested_blocks(): def build(code, depth): if depth == 0: return code", "name', to=6) # Finally bytes. error, = _get_error_list(r'b\"\\x\"', version=each_version) wanted", "places. But in others this error makes sense. return [wanted,", "('1 +\\n2 +', [(1, 3), (2, 3)]), ('x + 2',", "== 'SyntaxError: assignment to None': # Python 2.6 does has", "assign to keyword\", 'SyntaxError: cannot assign to __debug__'], line_nr elif", "+ indent(code) errors = _get_error_list(code) if errors: assert errors[0].message ==", "error or not. warnings.filterwarnings('ignore', category=SyntaxWarning) try: compile(code, '<unknown>', 'exec') except", "< (3, 8): assert line_nr == error.start_pos[0] else: assert line_nr", "e: wanted = e.__class__.__name__ + ': ' + e.msg line_nr", "raise an exception.\" # SyntaxError # Python 2.6 has a", "wanted = 'SyntaxError: EOL while scanning string literal' elif wanted", "wanted # Somehow in Python3.3 the SyntaxError().lineno is sometimes None", "test_invalid_fstrings(code, message): \"\"\" Some fstring errors are handled differntly in", "assert is_issue('1\\n' + i1) assert is_issue('\"\";1\\n' + i1) assert is_issue('\"\"\\n%s\\nfrom", "message == 'SyntaxError: invalid syntax' else: assert message == 'SyntaxError:", "# For this error the position seemed to be one", "assign to __debug__' elif wanted == 'SyntaxError: can use starred", "different error. wanted = 'SyntaxError: cannot assign to None' elif", "[wanted, \"SyntaxError: can't use starred expression here\"], line_nr elif wanted", "literals in Python 2 like '\\x' # that are oddly", "internal functions to assure that unicode/bytes escaping is without syntax", "sequence'), get_msg(r\"truncated %s escape\" % escape)) error, = _get_error_list(r'u\"\\x\"', version=each_version)", "'code', [ \"f'{*args,}'\", r'f\"\\\"\"', r'f\"\\\\\\\"\"', r'fr\"\\\"\"', r'fr\"\\\\\\\"\"', r\"print(f'Some {x:.2f} and", "('invalid syntax')), ] ) def test_invalid_fstrings(code, message): \"\"\" Some fstring", "message = works_not_in_py.get_error_message('def foo(*): pass') if works_not_in_py.version.startswith('2'): assert message ==", "is_issue('\"\";' + i1) assert not is_issue('\"\"\\n' + i1) assert not", "indent(code) errors = _get_error_list(code) if errors: assert errors[0].message == 'SyntaxError:", "0)]), ('[\\ndef x(): pass', [(2, 0)]), ('[\\nif 1: pass', [(2,", "EOL while scanning string literal\", \"SyntaxError: unexpected character after line", "in PyPy might be different. Just skip the module for", "errors @pytest.mark.parametrize('code', FAILING_EXAMPLES) def test_python_exception_matches(code): wanted, line_nr = _get_actual_exception(code) errors", "try: compile(code, '<unknown>', 'exec') except (SyntaxError, IndentationError) as e: wanted", "assign to None' elif wanted == 'SyntaxError: can not assign", "== 'SyntaxError: non-keyword arg after keyword arg': # The python", "not is_issue('\"\"\\n' + i1) assert not is_issue('\"\"\\n%s\\n%s', i1, i2) assert", "[(1, 4)]), ('?\\n1\\n?', [(1, 0), (3, 0)]), ] ) def", "is None or line_nr == error.start_pos[0] def test_non_async_in_async(): \"\"\" This", "r'f\"\\\\\\\"\"', r'fr\"\\\"\"', r'fr\"\\\\\\\"\"', r\"print(f'Some {x:.2f} and some {y}')\", ] )", "fstring errors are handled differntly in 3.6 and other versions.", "_get_error_list(r'u\"\\N{}\"', version=each_version) assert error.message == get_msg(r'malformed \\N character escape', to=2)", "from byte literals in Python 2 like '\\x' # that", "'def bar():\\n' + indent(code) errors = _get_error_list(code) if errors: assert", "different error. wanted = 'SyntaxError: cannot assign to __debug__' elif", "== 'SyntaxError: f-string: unterminated string': wanted = 'SyntaxError: EOL while", "syntax' if wanted == 'SyntaxError: non-keyword arg after keyword arg':", "line_nr = _get_actual_exception(code) error, = _get_error_list(code) assert error.message in wanted", "errors[0].message == 'SyntaxError: too many statically nested blocks' return errors[0]", "\" \\ \"codec can't decode bytes in position 0-%s: \"", "= 'SyntaxError: invalid syntax' elif wanted == \"SyntaxError: f-string: single", "\\x escape' if sys.version_info >= (3, 0): # The positioning", "_get_error_list(code)] assert [(pos, error_code) for pos in positions] == errors", "are using internal functions to assure that unicode/bytes escaping is", "+', [(1, 3), (2, 3)]), ('x + 2', []), ('[\\n',", "def foo():\\n def nofoo():[x async for x in []]' wanted,", "base = 'def x():\\n if x:\\n' assert not _get_error_list(build_nested('pass', 49,", "_get_actual_exception(code): with warnings.catch_warnings(): # We don't care about warnings where", "arg after keyword arg': # The python 3.5+ way, a", "= build('foo', depth) if add_func: code = 'def bar():\\n' +", "0)]), ] ) def test_indentation_errors(code, positions): assert_comparison(code, 903, positions) def", "def get_error(depth, add_func=False): code = build('foo', depth) if add_func: code", "\"f'{*args,}'\", r'f\"\\\"\"', r'f\"\\\\\\\"\"', r'fr\"\\\"\"', r'fr\"\\\\\\\"\"', r\"print(f'Some {x:.2f} and some {y}')\",", "and some {y}')\", ] ) def test_valid_fstrings(code): assert not _get_error_list(code,", "[(1, 3)]), ('1 +\\n2 +', [(1, 3), (2, 3)]), ('x", "literal' elif wanted == 'SyntaxError: f-string expression part cannot include", "*' works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1): pass')", "in positions] == errors @pytest.mark.parametrize('code', FAILING_EXAMPLES) def test_python_exception_matches(code): wanted, line_nr", "return None assert get_error(19) is None assert get_error(19, add_func=True) is", "not is_issue(i1) assert not is_issue(i1 + ';' + i2) assert", "== 'SyntaxError: can use starred expression only as assignment target':", "add_func=False): code = build('foo', depth) if add_func: code = 'def", "return code new_code = 'if 1:\\n' + indent(code) return build(new_code,", "either an error or not. warnings.filterwarnings('ignore', category=SyntaxWarning) try: compile(code, '<unknown>',", "*args): code = code % args return bool(_get_error_list(code)) i1 =", "i2) assert not is_issue('\"\"\\n%s;%s', i1, i2) assert not is_issue('\"\";%s;%s ',", "code new_code = 'if 1:\\n' + indent(code) return build(new_code, depth", "0)]), ('def x():\\n 1\\n 2', [(3, 0)]), ('def x():\\n1', [(2,", "care about warnings where locals/globals misbehave here. # It's as", "i1) assert is_issue('1\\n' + i1) assert is_issue('\"\";1\\n' + i1) assert", "works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1, **dct): pass') def test_escape_decode_literals(each_version): \"\"\" We", "The errors in PyPy might be different. Just skip the", "wanted = e.__class__.__name__ + ': ' + e.msg line_nr =", "simple as either an error or not. warnings.filterwarnings('ignore', category=SyntaxWarning) try:", "line_nr = _get_actual_exception(code) errors = _get_error_list(code) actual = None if", "to=6) # Finally bytes. error, = _get_error_list(r'b\"\\x\"', version=each_version) wanted =", "def test_non_async_in_async(): \"\"\" This example doesn't work with FAILING_EXAMPLES, because", "if sys.version_info[:2] == (2, 6) and wanted == 'SyntaxError: unexpected", "('[\\ndef x(): pass', [(2, 0)]), ('[\\nif 1: pass', [(2, 0)]),", "in others this error makes sense. return [wanted, \"SyntaxError: can't", "wanted == \"SyntaxError: f-string expression part cannot include '#'\": wanted", "== get_msg(r'unknown Unicode character name', to=6) # Finally bytes. error,", "# The python 3.5+ way, a bit nicer. wanted =", "cannot assign to __debug__'], line_nr elif wanted == 'SyntaxError: assignment", "* 2', [(1, 4)]), ('?\\n1\\n?', [(1, 0), (3, 0)]), ]", "if is_pypy: # The errors in PyPy might be different.", "messages here, so skip it. if sys.version_info[:2] == (2, 6)", "really matter. code = 'try: pass\\nexcept: pass\\nexcept X: pass' wanted,", "scanning string literal' elif wanted == 'SyntaxError: f-string: empty expression", "cannot include a backslash': return [ wanted, \"SyntaxError: EOL while", "if sys.version_info[:2] < (3, 5): pytest.skip() # Raises multiple errors", "assert not is_issue('\"\";' + i1) assert not is_issue('\"\";' + i1)", "errors: assert errors[0].message == 'SyntaxError: too many statically nested blocks'", "other versions. Therefore check specifically for these errors here. \"\"\"", "def _get_error_list(code, version=None): grammar = parso.load_grammar(version=version) tree = grammar.parse(code) return", "errors and indentation errors. \"\"\" import sys import warnings import", "pass\\nexcept X: pass' wanted, line_nr = _get_actual_exception(code) error, = _get_error_list(code)", "', i1, i2) assert not is_issue('\"\";%s\\n%s ', i1, i2) assert", "version=each_version) assert error.message == get_msg(r'malformed \\N character escape', to=2) error,", "message): \"\"\" Some fstring errors are handled differntly in 3.6", "indent(code) return build(new_code, depth - 1) def get_error(depth, add_func=False): code", "to __debug__': # Python 2.6 does has a slightly different", "allowed\": wanted = 'SyntaxError: invalid syntax' return [wanted], line_nr def", "assert message in error.message @pytest.mark.parametrize( 'code', [ \"from foo import", "error. wanted = 'SyntaxError: cannot assign to None' elif wanted", "error.message in get_msgs(r'\\UXXXXXXXX') error, = _get_error_list(r'u\"\\N{}\"', version=each_version) assert error.message ==", "0): # The positioning information is only available in Python", "@pytest.mark.parametrize( 'code', [ \"from foo import (\\nbar,\\n rab,\\n)\", \"from foo", "assert_comparison(code, 903, positions) def _get_actual_exception(code): with warnings.catch_warnings(): # We don't", "e.lineno except ValueError as e: # The ValueError comes from", "actual in wanted if sys.version_info[:2] < (3, 8): assert line_nr", "2 like '\\x' # that are oddly enough not SyntaxErrors.", "= 'SyntaxError: (value error) ' + str(e) line_nr = None", "== \"SyntaxError: f-string: single '}' is not allowed\": wanted =", "args return bool(_get_error_list(code)) i1 = 'from __future__ import division' i2", "is None assert get_error(19, add_func=True) is None assert get_error(20) assert", "i1, i2) assert not is_issue('\"\"\\n%s;%s', i1, i2) assert not is_issue('\"\";%s;%s", "\"SyntaxError: can't use starred expression here\"], line_nr elif wanted ==", "pass', [(2, 0)]), ('[\\nif 1: pass', [(2, 0)]), ('1+?', [(1,", "indentation errors. \"\"\" import sys import warnings import pytest import", "is_issue('1\\n' + i1) assert is_issue('\"\";1\\n' + i1) assert is_issue('\"\"\\n%s\\nfrom x", "assurance that this works through versions, because the internal function", "get_msg(end, to=1): base = \"SyntaxError: (unicode error) 'unicodeescape' \" \\", "assert error.message == wanted def test_too_many_levels_of_indentation(): assert not _get_error_list(build_nested('pass', 99))", "elif wanted == 'SyntaxError: can not assign to __debug__': #", "assert [(pos, error_code) for pos in positions] == errors @pytest.mark.parametrize('code',", "slightly different error. wanted = 'SyntaxError: cannot assign to None'", "warning than 3.5/3.6 in # certain places. But in others", "3.5/3.6 in # certain places. But in others this error", "' at position 0' assert error.message == wanted def test_too_many_levels_of_indentation():", "elif wanted == 'SyntaxError: assignment to keyword': return [wanted, \"SyntaxError:", "100)) base = 'def x():\\n if x:\\n' assert not _get_error_list(build_nested('pass',", "assert get_error(20, add_func=True) def test_future_import_first(): def is_issue(code, *args): code =", "syntax' else: assert message == 'SyntaxError: named arguments must follow", "differntly in 3.6 and other versions. Therefore check specifically for", "[(1, 0)]), ('def x():\\n 1\\n 2', [(3, 0)]), ('def x():\\n", "3)]), ('x + 2', []), ('[\\n', [(2, 0)]), ('[\\ndef x():", "assert actual in wanted # Somehow in Python3.3 the SyntaxError().lineno", "follows keyword argument' elif wanted == 'SyntaxError: assignment to keyword':", "get_error(19, add_func=True) is None assert get_error(20) assert get_error(20, add_func=True) def", "0 # For whatever reason this is zero in Python", "\"from foo import (bar, rab, )\", ] ) def test_trailing_comma(code):", "pass') def test_escape_decode_literals(each_version): \"\"\" We are using internal functions to", "= errors actual = error.message assert actual in wanted if", "internal function might change over time. \"\"\" def get_msg(end, to=1):", "@pytest.mark.parametrize( ('code', 'positions'), [ (' 1', [(1, 0)]), ('def x():\\n", "assignment target': # Python 3.4/3.4 have a bit of a", "version=each_version) assert error.message in get_msgs(r'\\xXX') error, = _get_error_list(r'u\"\\u\"', version=each_version) assert", "\"SyntaxError: EOL while scanning string literal\", \"SyntaxError: unexpected character after", "wanted += ' at position 0' assert error.message == wanted", "return [wanted, \"SyntaxError: can't assign to keyword\", 'SyntaxError: cannot assign", "so skip it. if sys.version_info[:2] == (2, 6) and wanted", "_get_error_list(r'u\"\\x\"', version=each_version) assert error.message in get_msgs(r'\\xXX') error, = _get_error_list(r'u\"\\u\"', version=each_version)", "f-string: expecting '}'\": wanted = 'SyntaxError: EOL while scanning string", "[(pos, error_code) for pos in positions] == errors @pytest.mark.parametrize('code', FAILING_EXAMPLES)", "Here we make a bit of quality assurance that this", "1', [(1, 0)]), ('def x():\\n 1\\n 2', [(3, 0)]), ('def", "= _get_error_list(code, version='3.6') assert message in error.message @pytest.mark.parametrize( 'code', [", "error.message in get_msgs(r'\\uXXXX') error, = _get_error_list(r'u\"\\U\"', version=each_version) assert error.message in", "is not allowed\": wanted = 'SyntaxError: invalid syntax' return [wanted],", "+ i2) assert not is_issue(i1 + '\\n' + i2) assert", "'from __future__ import absolute_import' assert not is_issue(i1) assert not is_issue(i1", "This example doesn't work with FAILING_EXAMPLES, because the line numbers", "works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1): pass') works_not_in_py.assert_no_error_in_passing('def", "is_issue('\"\"\\n%s\\nfrom x import a\\n%s', i1, i2) assert is_issue('%s\\n\"\"\\n%s', i1, i2)", "while scanning string literal' elif wanted == 'SyntaxError: f-string expression", "a backslash': return [ wanted, \"SyntaxError: EOL while scanning string", "'SyntaxError: named arguments must follow bare *' works_not_in_py.assert_no_error_in_passing('def foo(*, name):", "== 'SyntaxError: unexpected EOF while parsing': wanted = 'SyntaxError: invalid", "to=2) error, = _get_error_list(r'u\"\\N{foo}\"', version=each_version) assert error.message == get_msg(r'unknown Unicode", "is_issue('\"\";%s\\n%s ', i1, i2) assert is_issue('1;' + i1) assert is_issue('1\\n'", "enough not SyntaxErrors. wanted = 'SyntaxError: (value error) ' +", "as e: wanted = e.__class__.__name__ + ': ' + e.msg", "\" % to return base + end def get_msgs(escape): return", "line_nr = None else: assert False, \"The piece of code", "[ ('1 +', [(1, 3)]), ('1 +\\n', [(1, 3)]), ('1", "': ' + e.msg line_nr = e.lineno except ValueError as", "We don't care about warnings where locals/globals misbehave here. #", "this works through versions, because the internal function might change", "warnings.filterwarnings('ignore', category=SyntaxWarning) try: compile(code, '<unknown>', 'exec') except (SyntaxError, IndentationError) as", "continuation character\", ], line_nr elif wanted == \"SyntaxError: f-string: expecting", "is_issue('\"\";1\\n' + i1) assert is_issue('\"\"\\n%s\\nfrom x import a\\n%s', i1, i2)", "assignment to None': # Python 2.6 does has a slightly", "\"SyntaxError: (unicode error) 'unicodeescape' \" \\ \"codec can't decode bytes", "line_nr == error.start_pos[0] def test_non_async_in_async(): \"\"\" This example doesn't work", "reason this is zero in Python 3.8+ @pytest.mark.parametrize( ('code', 'positions'),", "errors: error, = errors actual = error.message assert actual in", "('[\\n', [(2, 0)]), ('[\\ndef x(): pass', [(2, 0)]), ('[\\nif 1:", "assert not is_issue('\"\";%s;%s ', i1, i2) assert not is_issue('\"\";%s\\n%s ',", "foo(*, **dict): pass') message = works_not_in_py.get_error_message('def foo(*): pass') if works_not_in_py.version.startswith('2'):", "(get_msg('end of string in escape sequence'), get_msg(r\"truncated %s escape\" %", "Python 2 like '\\x' # that are oddly enough not", "exception.\" # SyntaxError # Python 2.6 has a bit different", "return [wanted, \"SyntaxError: can't use starred expression here\"], line_nr elif", "r'SyntaxError: (value error) invalid \\x escape' if sys.version_info >= (3,", "Python 2.6 has a bit different error messages here, so", "Therefore check specifically for these errors here. \"\"\" error, =", "depth == 0: return code new_code = 'if 1:\\n' +", "\"codec can't decode bytes in position 0-%s: \" % to", "position 0-%s: \" % to return base + end def", "('? * ?', [(1, 0)]), ('1 + * * 2',", "and indentation errors. \"\"\" import sys import warnings import pytest", "in # certain places. But in others this error makes", "2', [(1, 4)]), ('?\\n1\\n?', [(1, 0), (3, 0)]), ] )", "bit of quality assurance that this works through versions, because", "error_code, positions): errors = [(error.start_pos, error.code) for error in _get_error_list(code)]", "= _get_error_list(r'u\"\\x\"', version=each_version) assert error.message in get_msgs(r'\\xXX') error, = _get_error_list(r'u\"\\u\"',", "because the internal function might change over time. \"\"\" def", "string in escape sequence'), get_msg(r\"truncated %s escape\" % escape)) error,", "+ ';' + i2) assert not is_issue(i1 + '\\n' +", "version=each_version) assert error.message in get_msgs(r'\\UXXXXXXXX') error, = _get_error_list(r'u\"\\N{}\"', version=each_version) assert", "return errors[0] return None assert get_error(19) is None assert get_error(19,", "errors[0] return None assert get_error(19) is None assert get_error(19, add_func=True)", "Finally bytes. error, = _get_error_list(r'b\"\\x\"', version=each_version) wanted = r'SyntaxError: (value", "this error makes sense. return [wanted, \"SyntaxError: can't use starred", "include a backslash': return [ wanted, \"SyntaxError: EOL while scanning", "to None': # Python 2.6 does has a slightly different", "f-string: single '}' is not allowed\": wanted = 'SyntaxError: invalid", "], line_nr elif wanted == \"SyntaxError: f-string: expecting '}'\": wanted", "- 1) def get_error(depth, add_func=False): code = build('foo', depth) if", "**dct): pass') def test_escape_decode_literals(each_version): \"\"\" We are using internal functions", "'SyntaxError: unexpected EOF while parsing': wanted = 'SyntaxError: invalid syntax'", "line numbers are not always the same / incorrect in", "are not always the same / incorrect in Python 3.8.", "PyPy might be different. Just skip the module for now.", "expression only as assignment target': # Python 3.4/3.4 have a", "] ) def test_valid_fstrings(code): assert not _get_error_list(code, version='3.6') @pytest.mark.parametrize( ('code',", "comes from byte literals in Python 2 like '\\x' #", "assert is_issue('\"\"\\n%s\\nfrom x import a\\n%s', i1, i2) assert is_issue('%s\\n\"\"\\n%s', i1,", "3. wanted += ' at position 0' assert error.message ==", "= works_not_in_py.get_error_message('def foo(*, **dict): pass') message = works_not_in_py.get_error_message('def foo(*): pass')", "== (2, 6) and wanted == 'SyntaxError: unexpected EOF while", "wanted == 'SyntaxError: unexpected EOF while parsing': wanted = 'SyntaxError:", "in Python3.3 the SyntaxError().lineno is sometimes None assert line_nr is", "escape sequence'), get_msg(r\"truncated %s escape\" % escape)) error, = _get_error_list(r'u\"\\x\"',", "r'f\"\\\"\"', r'f\"\\\\\\\"\"', r'fr\"\\\"\"', r'fr\"\\\\\\\"\"', r\"print(f'Some {x:.2f} and some {y}')\", ]", "named arguments must follow bare *' works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass')", "syntax' elif wanted == \"SyntaxError: f-string: single '}' is not", "pass' wanted, line_nr = _get_actual_exception(code) error, = _get_error_list(code) assert error.message", "get_msg(r\"truncated %s escape\" % escape)) error, = _get_error_list(r'u\"\\x\"', version=each_version) assert", "message == 'SyntaxError: named arguments must follow bare *' works_not_in_py.assert_no_error_in_passing('def", "2.6 does has a slightly different error. wanted = 'SyntaxError:", "grammar.parse(code) return list(grammar.iter_errors(tree)) def assert_comparison(code, error_code, positions): errors = [(error.start_pos,", "= _get_error_list(r'u\"\\N{foo}\"', version=each_version) assert error.message == get_msg(r'unknown Unicode character name',", "get_msg(r'unknown Unicode character name', to=6) # Finally bytes. error, =", "assert error.message == get_msg(r'unknown Unicode character name', to=6) # Finally", "It's as simple as either an error or not. warnings.filterwarnings('ignore',", "not is_issue('\"\";' + i1) assert not is_issue('\"\";' + i1) assert", "(bar, rab, )\", ] ) def test_trailing_comma(code): errors = _get_error_list(code)", "'SyntaxError: invalid syntax' return [wanted], line_nr def test_default_except_error_postition(): # For", "[]), ('[\\n', [(2, 0)]), ('[\\ndef x(): pass', [(2, 0)]), ('[\\nif", "903, positions) def _get_actual_exception(code): with warnings.catch_warnings(): # We don't care", "1: pass', [(2, 0)]), ('1+?', [(1, 2)]), ('?', [(1, 0)]),", "# SyntaxError # Python 2.6 has a bit different error", "assert_comparison(code, 901, positions) @pytest.mark.parametrize( ('code', 'positions'), [ (' 1', [(1,", "+ e.msg line_nr = e.lineno except ValueError as e: #", "unexpected EOF while parsing': wanted = 'SyntaxError: invalid syntax' if", "zero in Python 3.8+ @pytest.mark.parametrize( ('code', 'positions'), [ ('1 +',", "'SyntaxError: assignment to None': # Python 2.6 does has a", "__debug__': # Python 2.6 does has a slightly different error.", "1:\\n' + indent(code) return build(new_code, depth - 1) def get_error(depth,", "i2 = 'from __future__ import absolute_import' assert not is_issue(i1) assert", "# The errors in PyPy might be different. Just skip", "_get_actual_exception(code) errors = _get_error_list(code) actual = None if errors: error,", "line_nr == 0 # For whatever reason this is zero", "'def x():\\n if x:\\n' assert not _get_error_list(build_nested('pass', 49, base=base)) assert", "\"SyntaxError: f-string: expecting '}'\": wanted = 'SyntaxError: EOL while scanning", "(2, 3)]), ('x + 2', []), ('[\\n', [(2, 0)]), ('[\\ndef", "expression part cannot include a backslash': return [ wanted, \"SyntaxError:", "for error in _get_error_list(code)] assert [(pos, error_code) for pos in", "error.message @pytest.mark.parametrize( 'code', [ \"from foo import (\\nbar,\\n rab,\\n)\", \"from", "'}' is not allowed\": wanted = 'SyntaxError: invalid syntax' return", "i1 = 'from __future__ import division' i2 = 'from __future__", "if sys.version_info >= (3, 0): # The positioning information is", "= _get_error_list(code) if errors: assert errors[0].message == 'SyntaxError: too many", "_get_error_list(code) actual = None if errors: error, = errors actual", "elif wanted == 'SyntaxError: f-string: unterminated string': wanted = 'SyntaxError:", "line_nr elif wanted == \"SyntaxError: f-string: expecting '}'\": wanted =", "wanted, line_nr = _get_actual_exception(code) error, = _get_error_list(code) assert error.message in", "many statically nested blocks' return errors[0] return None assert get_error(19)", "is None assert get_error(20) assert get_error(20, add_func=True) def test_future_import_first(): def", "wanted def test_too_many_levels_of_indentation(): assert not _get_error_list(build_nested('pass', 99)) assert _get_error_list(build_nested('pass', 100))", "= parso.load_grammar(version=version) tree = grammar.parse(code) return list(grammar.iter_errors(tree)) def assert_comparison(code, error_code,", "def test_indentation_errors(code, positions): assert_comparison(code, 903, positions) def _get_actual_exception(code): with warnings.catch_warnings():", "(\"f'{1+}'\", ('invalid syntax')), (r'f\"\\\"', ('invalid syntax')), (r'fr\"\\\"', ('invalid syntax')), ]", "import sys import warnings import pytest import parso from parso._compatibility", "invalid syntax' if wanted == 'SyntaxError: non-keyword arg after keyword", "SyntaxErrors. wanted = 'SyntaxError: (value error) ' + str(e) line_nr", "[(1, 3)]), ('1 +\\n', [(1, 3)]), ('1 +\\n2 +', [(1,", "pos in positions] == errors @pytest.mark.parametrize('code', FAILING_EXAMPLES) def test_python_exception_matches(code): wanted,", "string literal\", \"SyntaxError: unexpected character after line continuation character\", ],", "foo():\\n def nofoo():[x async for x in []]' wanted, line_nr", "assert line_nr != error.start_pos[0] # I think this is the", "nested blocks' return errors[0] return None assert get_error(19) is None", "warnings where locals/globals misbehave here. # It's as simple as", "to=1): base = \"SyntaxError: (unicode error) 'unicodeescape' \" \\ \"codec", "way, a bit nicer. wanted = 'SyntaxError: positional argument follows", "versions. Therefore check specifically for these errors here. \"\"\" error,", "version=each_version) assert error.message in get_msgs(r'\\uXXXX') error, = _get_error_list(r'u\"\\U\"', version=each_version) assert", "Python 3.8+ @pytest.mark.parametrize( ('code', 'positions'), [ ('1 +', [(1, 3)]),", "assignment to keyword': return [wanted, \"SyntaxError: can't assign to keyword\",", "# really matter. code = 'try: pass\\nexcept: pass\\nexcept X: pass'", "get_error(20, add_func=True) def test_future_import_first(): def is_issue(code, *args): code = code", "sys import warnings import pytest import parso from parso._compatibility import", "'\\x' # that are oddly enough not SyntaxErrors. wanted =", "False, \"The piece of code should raise an exception.\" #", "position. assert error.start_pos[0] == 2 def test_statically_nested_blocks(): def build(code, depth):", "bit of a different warning than 3.5/3.6 in # certain", "assert not is_issue('\"\"\\n%s\\n%s', i1, i2) assert not is_issue('\"\"\\n%s;%s', i1, i2)", "actual = error.message assert actual in wanted # Somehow in", "error) ' + str(e) line_nr = None else: assert False,", "pytest.skip() # Raises multiple errors in previous versions. code =", "errors in previous versions. code = 'async def foo():\\n def", "string literal' elif wanted == 'SyntaxError: f-string expression part cannot", "line_nr = e.lineno except ValueError as e: # The ValueError", "pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1,", "position 0' assert error.message == wanted def test_too_many_levels_of_indentation(): assert not", "== 'SyntaxError: can not assign to __debug__': # Python 2.6", "assert line_nr == error.start_pos[0] else: assert line_nr == 0 #", "a bit different error messages here, so skip it. if", "assert not is_issue('\"\"\\n%s;%s', i1, i2) assert not is_issue('\"\";%s;%s ', i1,", "0)]), ('? * ?', [(1, 0)]), ('1 + * *", "'from __future__ import division' i2 = 'from __future__ import absolute_import'", "'SyntaxError: f-string: empty expression not allowed': wanted = 'SyntaxError: invalid", "in escape sequence'), get_msg(r\"truncated %s escape\" % escape)) error, =", "[(1, 0)]), ('?\\n?', [(1, 0), (2, 0)]), ('? * ?',", "for now. pytestmark = pytest.mark.skip() def _get_error_list(code, version=None): grammar =", "None or line_nr == error.start_pos[0] def test_non_async_in_async(): \"\"\" This example", "= r'SyntaxError: (value error) invalid \\x escape' if sys.version_info >=", "assert error.message in wanted assert line_nr != error.start_pos[0] # I", "errors. \"\"\" import sys import warnings import pytest import parso", "line off, but that doesn't # really matter. code =", "import (\\nbar,\\n rab,\\n)\", \"from foo import (bar, rab, )\", ]", "for these errors here. \"\"\" error, = _get_error_list(code, version='3.6') assert", "the module for now. pytestmark = pytest.mark.skip() def _get_error_list(code, version=None):", "= error.message assert actual in wanted if sys.version_info[:2] < (3,", "in wanted # Somehow in Python3.3 the SyntaxError().lineno is sometimes", "0), (2, 0)]), ('? * ?', [(1, 0)]), ('1 +", "are handled differntly in 3.6 and other versions. Therefore check", "to assure that unicode/bytes escaping is without syntax errors. Here", "r\"print(f'Some {x:.2f} and some {y}')\", ] ) def test_valid_fstrings(code): assert", "'code', [ \"from foo import (\\nbar,\\n rab,\\n)\", \"from foo import", "== 'SyntaxError: named arguments must follow bare *' works_not_in_py.assert_no_error_in_passing('def foo(*,", "+\\n', [(1, 3)]), ('1 +\\n2 +', [(1, 3), (2, 3)]),", "wanted == 'SyntaxError: assignment to keyword': return [wanted, \"SyntaxError: can't", "'SyntaxError: too many statically nested blocks' return errors[0] return None", "errors are handled differntly in 3.6 and other versions. Therefore", "(SyntaxError, IndentationError) as e: wanted = e.__class__.__name__ + ': '", "error messages here, so skip it. if sys.version_info[:2] == (2,", "error.start_pos[0] else: assert line_nr == 0 # For whatever reason", "not allowed\": wanted = 'SyntaxError: invalid syntax' return [wanted], line_nr", "+ i1) assert is_issue('1\\n' + i1) assert is_issue('\"\";1\\n' + i1)", "def is_issue(code, *args): code = code % args return bool(_get_error_list(code))", "Some fstring errors are handled differntly in 3.6 and other", "name=1, **dct): pass') def test_escape_decode_literals(each_version): \"\"\" We are using internal", "works through versions, because the internal function might change over", "('invalid syntax')), (r'fr\"\\\"', ('invalid syntax')), ] ) def test_invalid_fstrings(code, message):", "errors actual = error.message assert actual in wanted # Somehow", "position seemed to be one line off, but that doesn't", "a\\n%s', i1, i2) assert is_issue('%s\\n\"\"\\n%s', i1, i2) def test_named_argument_issues(works_not_in_py): message", "build('foo', depth) if add_func: code = 'def bar():\\n' + indent(code)", "== 'SyntaxError: f-string: empty expression not allowed': wanted = 'SyntaxError:", "wanted, \"SyntaxError: EOL while scanning string literal\", \"SyntaxError: unexpected character", "using internal functions to assure that unicode/bytes escaping is without", "'SyntaxError: can use starred expression only as assignment target': #", "SyntaxError # Python 2.6 has a bit different error messages", "3)]), ('1 +\\n', [(1, 3)]), ('1 +\\n2 +', [(1, 3),", "(2, 6) and wanted == 'SyntaxError: unexpected EOF while parsing':", "seemed to be one line off, but that doesn't #", "target': # Python 3.4/3.4 have a bit of a different", "def test_statically_nested_blocks(): def build(code, depth): if depth == 0: return", "% args return bool(_get_error_list(code)) i1 = 'from __future__ import division'", "+ i1) assert is_issue('\"\"\\n%s\\nfrom x import a\\n%s', i1, i2) assert", "only as assignment target': # Python 3.4/3.4 have a bit", "*, name=1, **dct): pass') def test_escape_decode_literals(each_version): \"\"\" We are using", "errors. Here we make a bit of quality assurance that", "[(3, 0)]), ('def x():\\n 1\\n 2', [(3, 0)]), ('def x():\\n1',", "after line continuation character\", ], line_nr elif wanted == \"SyntaxError:", "an exception.\" # SyntaxError # Python 2.6 has a bit", "is_pypy from .failing_examples import FAILING_EXAMPLES, indent, build_nested if is_pypy: #", "[(2, 0)]), ] ) def test_indentation_errors(code, positions): assert_comparison(code, 903, positions)", "where locals/globals misbehave here. # It's as simple as either", "assert not is_issue(i1 + '\\n' + i2) assert not is_issue('\"\";'", "None assert get_error(19) is None assert get_error(19, add_func=True) is None", "except ValueError as e: # The ValueError comes from byte", "unexpected character after line continuation character\", ], line_nr elif wanted", "base=base)) assert _get_error_list(build_nested('pass', 50, base=base)) @pytest.mark.parametrize( 'code', [ \"f'{*args,}'\", r'f\"\\\"\"',", "foo import (\\nbar,\\n rab,\\n)\", \"from foo import (bar, rab, )\",", "error. wanted = 'SyntaxError: cannot assign to __debug__' elif wanted", "error makes sense. return [wanted, \"SyntaxError: can't use starred expression", "scanning string literal' elif wanted == 'SyntaxError: f-string expression part", "import is_pypy from .failing_examples import FAILING_EXAMPLES, indent, build_nested if is_pypy:", "not. warnings.filterwarnings('ignore', category=SyntaxWarning) try: compile(code, '<unknown>', 'exec') except (SyntaxError, IndentationError)", "wanted = 'SyntaxError: positional argument follows keyword argument' elif wanted", "assert get_error(20) assert get_error(20, add_func=True) def test_future_import_first(): def is_issue(code, *args):", "+', [(1, 3)]), ('1 +\\n', [(1, 3)]), ('1 +\\n2 +',", "'unicodeescape' \" \\ \"codec can't decode bytes in position 0-%s:", "too many statically nested blocks' return errors[0] return None assert", "empty expression not allowed': wanted = 'SyntaxError: invalid syntax' elif", "!= error.start_pos[0] # I think this is the better position.", "not always the same / incorrect in Python 3.8. \"\"\"", "@pytest.mark.parametrize( ('code', 'positions'), [ ('1 +', [(1, 3)]), ('1 +\\n',", "__future__ import absolute_import' assert not is_issue(i1) assert not is_issue(i1 +", "in position 0-%s: \" % to return base + end", "[wanted], line_nr def test_default_except_error_postition(): # For this error the position", "+ indent(code) return build(new_code, depth - 1) def get_error(depth, add_func=False):", "# The ValueError comes from byte literals in Python 2", "We are using internal functions to assure that unicode/bytes escaping", "== 0 # For whatever reason this is zero in", "== 2 def test_statically_nested_blocks(): def build(code, depth): if depth ==", "sys.version_info[:2] < (3, 5): pytest.skip() # Raises multiple errors in", "assert not is_issue(i1) assert not is_issue(i1 + ';' + i2)", "return list(grammar.iter_errors(tree)) def assert_comparison(code, error_code, positions): errors = [(error.start_pos, error.code)", "base = \"SyntaxError: (unicode error) 'unicodeescape' \" \\ \"codec can't", "ValueError as e: # The ValueError comes from byte literals", "positions): errors = [(error.start_pos, error.code) for error in _get_error_list(code)] assert", "(r'f\"\\\"', ('invalid syntax')), (r'fr\"\\\"', ('invalid syntax')), ] ) def test_invalid_fstrings(code,", "be one line off, but that doesn't # really matter.", "('invalid syntax')), (r'f\"\\\"', ('invalid syntax')), (r'fr\"\\\"', ('invalid syntax')), ] )", ") def test_invalid_fstrings(code, message): \"\"\" Some fstring errors are handled", "'<unknown>', 'exec') except (SyntaxError, IndentationError) as e: wanted = e.__class__.__name__", "'SyntaxError: cannot assign to __debug__'], line_nr elif wanted == 'SyntaxError:", "backslash': return [ wanted, \"SyntaxError: EOL while scanning string literal\",", "[ (' 1', [(1, 0)]), ('def x():\\n 1\\n 2', [(3,", "import pytest import parso from parso._compatibility import is_pypy from .failing_examples", "_get_error_list(code) if errors: assert errors[0].message == 'SyntaxError: too many statically", "\"\"\" Testing if parso finds syntax errors and indentation errors.", "whatever reason this is zero in Python 3.8+ @pytest.mark.parametrize( ('code',", "to keyword\", 'SyntaxError: cannot assign to __debug__'], line_nr elif wanted", "warnings import pytest import parso from parso._compatibility import is_pypy from", "[ \"from foo import (\\nbar,\\n rab,\\n)\", \"from foo import (bar,", "IndentationError) as e: wanted = e.__class__.__name__ + ': ' +", "expression part cannot include '#'\": wanted = 'SyntaxError: invalid syntax'", "'\\n' + i2) assert not is_issue('\"\";' + i1) assert not", "warnings.catch_warnings(): # We don't care about warnings where locals/globals misbehave", "[(1, 3), (2, 3)]), ('x + 2', []), ('[\\n', [(2,", "# Python 3.4/3.4 have a bit of a different warning", "get_msgs(r'\\xXX') error, = _get_error_list(r'u\"\\u\"', version=each_version) assert error.message in get_msgs(r'\\uXXXX') error,", "is only available in Python 3. wanted += ' at", "check specifically for these errors here. \"\"\" error, = _get_error_list(code,", "be different. Just skip the module for now. pytestmark =", "makes sense. return [wanted, \"SyntaxError: can't use starred expression here\"],", "0)]), ('??', [(1, 0)]), ('? ?', [(1, 0)]), ('?\\n?', [(1,", "99)) assert _get_error_list(build_nested('pass', 100)) base = 'def x():\\n if x:\\n'", "works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1, **dct):", "assert _get_error_list(build_nested('pass', 50, base=base)) @pytest.mark.parametrize( 'code', [ \"f'{*args,}'\", r'f\"\\\"\"', r'f\"\\\\\\\"\"',", "('[\\nif 1: pass', [(2, 0)]), ('1+?', [(1, 2)]), ('?', [(1,", "f-string: empty expression not allowed': wanted = 'SyntaxError: invalid syntax'", "_get_error_list(build_nested('pass', 50, base=base)) @pytest.mark.parametrize( 'code', [ \"f'{*args,}'\", r'f\"\\\"\"', r'f\"\\\\\\\"\"', r'fr\"\\\"\"',", "50, base=base)) @pytest.mark.parametrize( 'code', [ \"f'{*args,}'\", r'f\"\\\"\"', r'f\"\\\\\\\"\"', r'fr\"\\\"\"', r'fr\"\\\\\\\"\"',", "off, but that doesn't # really matter. code = 'try:", "('code', 'message'), [ (\"f'{1+}'\", ('invalid syntax')), (r'f\"\\\"', ('invalid syntax')), (r'fr\"\\\"',", "\"\"\" We are using internal functions to assure that unicode/bytes", "= _get_error_list(r'u\"\\U\"', version=each_version) assert error.message in get_msgs(r'\\UXXXXXXXX') error, = _get_error_list(r'u\"\\N{}\"',", "wanted, line_nr = _get_actual_exception(code) errors = _get_error_list(code) actual = None", "a bit nicer. wanted = 'SyntaxError: positional argument follows keyword", "x():\\n 1\\n 2', [(3, 0)]), ('def x():\\n1', [(2, 0)]), ]", "here. \"\"\" error, = _get_error_list(code, version='3.6') assert message in error.message", "def test_invalid_fstrings(code, message): \"\"\" Some fstring errors are handled differntly", "import FAILING_EXAMPLES, indent, build_nested if is_pypy: # The errors in", "' + str(e) line_nr = None else: assert False, \"The", "end def get_msgs(escape): return (get_msg('end of string in escape sequence'),", "== wanted def test_too_many_levels_of_indentation(): assert not _get_error_list(build_nested('pass', 99)) assert _get_error_list(build_nested('pass',", "'#'\": wanted = 'SyntaxError: invalid syntax' elif wanted == \"SyntaxError:", "not _get_error_list(build_nested('pass', 49, base=base)) assert _get_error_list(build_nested('pass', 50, base=base)) @pytest.mark.parametrize( 'code',", "] ) def test_invalid_fstrings(code, message): \"\"\" Some fstring errors are", "misbehave here. # It's as simple as either an error", "= _get_error_list(r'u\"\\u\"', version=each_version) assert error.message in get_msgs(r'\\uXXXX') error, = _get_error_list(r'u\"\\U\"',", "test_future_import_first(): def is_issue(code, *args): code = code % args return", "version='3.6') assert message in error.message @pytest.mark.parametrize( 'code', [ \"from foo", "assert not is_issue('\"\";%s\\n%s ', i1, i2) assert is_issue('1;' + i1)", "errors = _get_error_list(code) actual = None if errors: error, =", "/ incorrect in Python 3.8. \"\"\" if sys.version_info[:2] < (3,", "0: return code new_code = 'if 1:\\n' + indent(code) return", "e.__class__.__name__ + ': ' + e.msg line_nr = e.lineno except", "depth) if add_func: code = 'def bar():\\n' + indent(code) errors", "keyword argument' elif wanted == 'SyntaxError: assignment to keyword': return", "cannot assign to None' elif wanted == 'SyntaxError: can not", "'SyntaxError: cannot assign to None' elif wanted == 'SyntaxError: can", "x:\\n' assert not _get_error_list(build_nested('pass', 49, base=base)) assert _get_error_list(build_nested('pass', 50, base=base))", "The python 3.5+ way, a bit nicer. wanted = 'SyntaxError:", "is_issue('\"\";%s;%s ', i1, i2) assert not is_issue('\"\";%s\\n%s ', i1, i2)", "information is only available in Python 3. wanted += '", "= 'SyntaxError: cannot assign to __debug__' elif wanted == 'SyntaxError:", "literal' elif wanted == 'SyntaxError: f-string: empty expression not allowed':", "to be one line off, but that doesn't # really", "e: # The ValueError comes from byte literals in Python", "def test_syntax_errors(code, positions): assert_comparison(code, 901, positions) @pytest.mark.parametrize( ('code', 'positions'), [", "of string in escape sequence'), get_msg(r\"truncated %s escape\" % escape))", "FAILING_EXAMPLES, indent, build_nested if is_pypy: # The errors in PyPy", "# that are oddly enough not SyntaxErrors. wanted = 'SyntaxError:", "('def x():\\n 1\\n 2', [(3, 0)]), ('def x():\\n1', [(2, 0)]),", "def test_future_import_first(): def is_issue(code, *args): code = code % args", "= error.message assert actual in wanted # Somehow in Python3.3", "code = build('foo', depth) if add_func: code = 'def bar():\\n'", "wanted == \"SyntaxError: f-string: expecting '}'\": wanted = 'SyntaxError: EOL", "_get_actual_exception(code) errors = _get_error_list(code) if errors: error, = errors actual", "use starred expression only as assignment target': # Python 3.4/3.4", "assert get_error(19) is None assert get_error(19, add_func=True) is None assert", "don't care about warnings where locals/globals misbehave here. # It's", "code = 'def bar():\\n' + indent(code) errors = _get_error_list(code) if", "i1, i2) assert not is_issue('\"\";%s;%s ', i1, i2) assert not", "[(2, 0)]), ('[\\ndef x(): pass', [(2, 0)]), ('[\\nif 1: pass',", "error, = _get_error_list(code, version='3.6') assert message in error.message @pytest.mark.parametrize( 'code',", "line_nr is None or line_nr == error.start_pos[0] def test_non_async_in_async(): \"\"\"", "1\\n 2', [(3, 0)]), ('def x():\\n1', [(2, 0)]), ] )", "line_nr = _get_actual_exception(code) errors = _get_error_list(code) if errors: error, =", "(unicode error) 'unicodeescape' \" \\ \"codec can't decode bytes in", "__future__ import division' i2 = 'from __future__ import absolute_import' assert", "starred expression only as assignment target': # Python 3.4/3.4 have", "'SyntaxError: EOL while scanning string literal' elif wanted == 'SyntaxError:", "'positions'), [ (' 1', [(1, 0)]), ('def x():\\n 1\\n 2',", "finds syntax errors and indentation errors. \"\"\" import sys import", "now. pytestmark = pytest.mark.skip() def _get_error_list(code, version=None): grammar = parso.load_grammar(version=version)", "5): pytest.skip() # Raises multiple errors in previous versions. code", "test_indentation_errors(code, positions): assert_comparison(code, 903, positions) def _get_actual_exception(code): with warnings.catch_warnings(): #", "has a slightly different error. wanted = 'SyntaxError: cannot assign", "multiple errors in previous versions. code = 'async def foo():\\n", "is_issue(i1) assert not is_issue(i1 + ';' + i2) assert not", "syntax' elif wanted == \"SyntaxError: f-string expression part cannot include", "decode bytes in position 0-%s: \" % to return base", "i1, i2) assert not is_issue('\"\";%s\\n%s ', i1, i2) assert is_issue('1;'", "to return base + end def get_msgs(escape): return (get_msg('end of", "% escape)) error, = _get_error_list(r'u\"\\x\"', version=each_version) assert error.message in get_msgs(r'\\xXX')", "actual in wanted # Somehow in Python3.3 the SyntaxError().lineno is", "invalid syntax' elif wanted == \"SyntaxError: f-string: single '}' is", "('x + 2', []), ('[\\n', [(2, 0)]), ('[\\ndef x(): pass',", "[(1, 0)]), ('1 + * * 2', [(1, 4)]), ('?\\n1\\n?',", "= [(error.start_pos, error.code) for error in _get_error_list(code)] assert [(pos, error_code)", "x():\\n if x:\\n' assert not _get_error_list(build_nested('pass', 49, base=base)) assert _get_error_list(build_nested('pass',", "line_nr elif wanted == 'SyntaxError: f-string: unterminated string': wanted =", "\"\"\" Some fstring errors are handled differntly in 3.6 and", "{y}')\", ] ) def test_valid_fstrings(code): assert not _get_error_list(code, version='3.6') @pytest.mark.parametrize(", "line_nr == error.start_pos[0] else: assert line_nr == 0 # For", "_get_error_list(code) if errors: error, = errors actual = error.message assert", "3.8+ @pytest.mark.parametrize( ('code', 'positions'), [ ('1 +', [(1, 3)]), ('1", "errors = [(error.start_pos, error.code) for error in _get_error_list(code)] assert [(pos,", "in Python 3.8+ @pytest.mark.parametrize( ('code', 'positions'), [ ('1 +', [(1,", "== 'SyntaxError: f-string expression part cannot include a backslash': return", "if sys.version_info[:2] < (3, 8): assert line_nr == error.start_pos[0] else:", "[ wanted, \"SyntaxError: EOL while scanning string literal\", \"SyntaxError: unexpected", "== errors @pytest.mark.parametrize('code', FAILING_EXAMPLES) def test_python_exception_matches(code): wanted, line_nr = _get_actual_exception(code)", "'SyntaxError: (value error) ' + str(e) line_nr = None else:", "i1, i2) assert is_issue('1;' + i1) assert is_issue('1\\n' + i1)", "in []]' wanted, line_nr = _get_actual_exception(code) errors = _get_error_list(code) if", "in Python 3.8. \"\"\" if sys.version_info[:2] < (3, 5): pytest.skip()", "wanted = 'SyntaxError: invalid syntax' return [wanted], line_nr def test_default_except_error_postition():", "must follow bare *' works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar,", "[(error.start_pos, error.code) for error in _get_error_list(code)] assert [(pos, error_code) for", "\"The piece of code should raise an exception.\" # SyntaxError", "[]]' wanted, line_nr = _get_actual_exception(code) errors = _get_error_list(code) if errors:", "byte literals in Python 2 like '\\x' # that are", "starred expression here\"], line_nr elif wanted == 'SyntaxError: f-string: unterminated", "line continuation character\", ], line_nr elif wanted == \"SyntaxError: f-string:", "test_default_except_error_postition(): # For this error the position seemed to be", "2 def test_statically_nested_blocks(): def build(code, depth): if depth == 0:", "assert line_nr == 0 # For whatever reason this is", "name): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1): pass') works_not_in_py.assert_no_error_in_passing('def foo(bar, *,", "0)]), ('1+?', [(1, 2)]), ('?', [(1, 0)]), ('??', [(1, 0)]),", "doesn't work with FAILING_EXAMPLES, because the line numbers are not", "arg': # The python 3.5+ way, a bit nicer. wanted", "ValueError comes from byte literals in Python 2 like '\\x'", "over time. \"\"\" def get_msg(end, to=1): base = \"SyntaxError: (unicode", "test_valid_fstrings(code): assert not _get_error_list(code, version='3.6') @pytest.mark.parametrize( ('code', 'message'), [ (\"f'{1+}'\",", "= 'from __future__ import division' i2 = 'from __future__ import", "keyword arg': # The python 3.5+ way, a bit nicer.", "or line_nr == error.start_pos[0] def test_non_async_in_async(): \"\"\" This example doesn't", "category=SyntaxWarning) try: compile(code, '<unknown>', 'exec') except (SyntaxError, IndentationError) as e:", "def _get_actual_exception(code): with warnings.catch_warnings(): # We don't care about warnings", "different. Just skip the module for now. pytestmark = pytest.mark.skip()", "= 'SyntaxError: positional argument follows keyword argument' elif wanted ==", "assign to __debug__'], line_nr elif wanted == 'SyntaxError: assignment to", "[wanted, \"SyntaxError: can't assign to keyword\", 'SyntaxError: cannot assign to", "def test_escape_decode_literals(each_version): \"\"\" We are using internal functions to assure", "0-%s: \" % to return base + end def get_msgs(escape):", "= 'SyntaxError: invalid syntax' elif wanted == \"SyntaxError: f-string expression", "not allowed': wanted = 'SyntaxError: invalid syntax' elif wanted ==", "None': # Python 2.6 does has a slightly different error.", "\"SyntaxError: f-string expression part cannot include '#'\": wanted = 'SyntaxError:", "Python 2.6 does has a slightly different error. wanted =", "error.message assert actual in wanted if sys.version_info[:2] < (3, 8):", "through versions, because the internal function might change over time.", "assert error.message in get_msgs(r'\\uXXXX') error, = _get_error_list(r'u\"\\U\"', version=each_version) assert error.message", "is the better position. assert error.start_pos[0] == 2 def test_statically_nested_blocks():", "None' elif wanted == 'SyntaxError: can not assign to __debug__':", "not _get_error_list(code, version='3.6') @pytest.mark.parametrize( ('code', 'message'), [ (\"f'{1+}'\", ('invalid syntax')),", "3.8. \"\"\" if sys.version_info[:2] < (3, 5): pytest.skip() # Raises", "('??', [(1, 0)]), ('? ?', [(1, 0)]), ('?\\n?', [(1, 0),", "Python 3.8. \"\"\" if sys.version_info[:2] < (3, 5): pytest.skip() #", "argument follows keyword argument' elif wanted == 'SyntaxError: assignment to", "assign to __debug__': # Python 2.6 does has a slightly", "pytestmark = pytest.mark.skip() def _get_error_list(code, version=None): grammar = parso.load_grammar(version=version) tree", "0), (3, 0)]), ] ) def test_syntax_errors(code, positions): assert_comparison(code, 901,", "not SyntaxErrors. wanted = 'SyntaxError: (value error) ' + str(e)", "argument' elif wanted == 'SyntaxError: assignment to keyword': return [wanted,", "Python 3.4/3.4 have a bit of a different warning than", "invalid syntax' return [wanted], line_nr def test_default_except_error_postition(): # For this", "locals/globals misbehave here. # It's as simple as either an", "this error the position seemed to be one line off,", "is_issue('\"\"\\n%s\\n%s', i1, i2) assert not is_issue('\"\"\\n%s;%s', i1, i2) assert not", "bar():\\n' + indent(code) errors = _get_error_list(code) if errors: assert errors[0].message", "different error messages here, so skip it. if sys.version_info[:2] ==", "if errors: error, = errors actual = error.message assert actual", "3.5+ way, a bit nicer. wanted = 'SyntaxError: positional argument", "works_not_in_py.version.startswith('2'): assert message == 'SyntaxError: invalid syntax' else: assert message", "(3, 0): # The positioning information is only available in", "can't decode bytes in position 0-%s: \" % to return", "(2, 0)]), ('? * ?', [(1, 0)]), ('1 + *", "wanted == 'SyntaxError: non-keyword arg after keyword arg': # The", "return [ wanted, \"SyntaxError: EOL while scanning string literal\", \"SyntaxError:", "include '#'\": wanted = 'SyntaxError: invalid syntax' elif wanted ==", "+\\n2 +', [(1, 3), (2, 3)]), ('x + 2', []),", "[ (\"f'{1+}'\", ('invalid syntax')), (r'f\"\\\"', ('invalid syntax')), (r'fr\"\\\"', ('invalid syntax')),", "part cannot include a backslash': return [ wanted, \"SyntaxError: EOL", "get_msgs(r'\\uXXXX') error, = _get_error_list(r'u\"\\U\"', version=each_version) assert error.message in get_msgs(r'\\UXXXXXXXX') error,", "X: pass' wanted, line_nr = _get_actual_exception(code) error, = _get_error_list(code) assert", "get_msgs(escape): return (get_msg('end of string in escape sequence'), get_msg(r\"truncated %s", "'SyntaxError: f-string: unterminated string': wanted = 'SyntaxError: EOL while scanning", ")\", ] ) def test_trailing_comma(code): errors = _get_error_list(code) assert not", "invalid \\x escape' if sys.version_info >= (3, 0): # The", "pytest.mark.skip() def _get_error_list(code, version=None): grammar = parso.load_grammar(version=version) tree = grammar.parse(code)", "= _get_error_list(code) if errors: error, = errors actual = error.message", "[(3, 0)]), ('def x():\\n1', [(2, 0)]), ] ) def test_indentation_errors(code,", "that are oddly enough not SyntaxErrors. wanted = 'SyntaxError: (value", "== error.start_pos[0] def test_non_async_in_async(): \"\"\" This example doesn't work with", "# For whatever reason this is zero in Python 3.8+", "= e.__class__.__name__ + ': ' + e.msg line_nr = e.lineno", "while scanning string literal' elif wanted == 'SyntaxError: f-string: empty", "not is_issue(i1 + ';' + i2) assert not is_issue(i1 +", "error, = _get_error_list(r'u\"\\U\"', version=each_version) assert error.message in get_msgs(r'\\UXXXXXXXX') error, =", "at position 0' assert error.message == wanted def test_too_many_levels_of_indentation(): assert", "parso finds syntax errors and indentation errors. \"\"\" import sys", "Python3.3 the SyntaxError().lineno is sometimes None assert line_nr is None", "i1) assert not is_issue('\"\";' + i1) assert not is_issue('\"\"\\n' +", "without syntax errors. Here we make a bit of quality", "x in []]' wanted, line_nr = _get_actual_exception(code) errors = _get_error_list(code)", "= 'SyntaxError: cannot assign to None' elif wanted == 'SyntaxError:", "assert message == 'SyntaxError: invalid syntax' else: assert message ==", "if x:\\n' assert not _get_error_list(build_nested('pass', 49, base=base)) assert _get_error_list(build_nested('pass', 50,", "* ?', [(1, 0)]), ('1 + * * 2', [(1,", "nofoo():[x async for x in []]' wanted, line_nr = _get_actual_exception(code)", "def get_msg(end, to=1): base = \"SyntaxError: (unicode error) 'unicodeescape' \"", "= works_not_in_py.get_error_message('def foo(*): pass') if works_not_in_py.version.startswith('2'): assert message == 'SyntaxError:", "('1 +', [(1, 3)]), ('1 +\\n', [(1, 3)]), ('1 +\\n2", "import a\\n%s', i1, i2) assert is_issue('%s\\n\"\"\\n%s', i1, i2) def test_named_argument_issues(works_not_in_py):", "[ \"f'{*args,}'\", r'f\"\\\"\"', r'f\"\\\\\\\"\"', r'fr\"\\\"\"', r'fr\"\\\\\\\"\"', r\"print(f'Some {x:.2f} and some", "from .failing_examples import FAILING_EXAMPLES, indent, build_nested if is_pypy: # The", "in Python 2 like '\\x' # that are oddly enough", "assert errors[0].message == 'SyntaxError: too many statically nested blocks' return", "here, so skip it. if sys.version_info[:2] == (2, 6) and", "in error.message @pytest.mark.parametrize( 'code', [ \"from foo import (\\nbar,\\n rab,\\n)\",", "\"from foo import (\\nbar,\\n rab,\\n)\", \"from foo import (bar, rab,", "work with FAILING_EXAMPLES, because the line numbers are not always", "non-keyword arg after keyword arg': # The python 3.5+ way,", "0)]), ('def x():\\n1', [(2, 0)]), ] ) def test_indentation_errors(code, positions):", ".failing_examples import FAILING_EXAMPLES, indent, build_nested if is_pypy: # The errors", "2', []), ('[\\n', [(2, 0)]), ('[\\ndef x(): pass', [(2, 0)]),", "test_syntax_errors(code, positions): assert_comparison(code, 901, positions) @pytest.mark.parametrize( ('code', 'positions'), [ ('", "some {y}')\", ] ) def test_valid_fstrings(code): assert not _get_error_list(code, version='3.6')", "is_issue('\"\"\\n' + i1) assert not is_issue('\"\"\\n%s\\n%s', i1, i2) assert not", "slightly different error. wanted = 'SyntaxError: cannot assign to __debug__'", "pass\\nexcept: pass\\nexcept X: pass' wanted, line_nr = _get_actual_exception(code) error, =", "('1 + * * 2', [(1, 4)]), ('?\\n1\\n?', [(1, 0),", "f-string: unterminated string': wanted = 'SyntaxError: EOL while scanning string", "assert message == 'SyntaxError: named arguments must follow bare *'", "scanning string literal\", \"SyntaxError: unexpected character after line continuation character\",", "1) def get_error(depth, add_func=False): code = build('foo', depth) if add_func:", "# Raises multiple errors in previous versions. code = 'async", "wanted == \"SyntaxError: f-string: single '}' is not allowed\": wanted", "escape)) error, = _get_error_list(r'u\"\\x\"', version=each_version) assert error.message in get_msgs(r'\\xXX') error,", "not is_issue('\"\";%s\\n%s ', i1, i2) assert is_issue('1;' + i1) assert", "x import a\\n%s', i1, i2) assert is_issue('%s\\n\"\"\\n%s', i1, i2) def", "assure that unicode/bytes escaping is without syntax errors. Here we", "bytes. error, = _get_error_list(r'b\"\\x\"', version=each_version) wanted = r'SyntaxError: (value error)", "FAILING_EXAMPLES) def test_python_exception_matches(code): wanted, line_nr = _get_actual_exception(code) errors = _get_error_list(code)", ">= (3, 0): # The positioning information is only available", "6) and wanted == 'SyntaxError: unexpected EOF while parsing': wanted", "assert not _get_error_list(build_nested('pass', 99)) assert _get_error_list(build_nested('pass', 100)) base = 'def", "cannot assign to __debug__' elif wanted == 'SyntaxError: can use", "else: assert line_nr == 0 # For whatever reason this", "error.message == wanted def test_too_many_levels_of_indentation(): assert not _get_error_list(build_nested('pass', 99)) assert", "parso from parso._compatibility import is_pypy from .failing_examples import FAILING_EXAMPLES, indent,", "of quality assurance that this works through versions, because the", "+ ': ' + e.msg line_nr = e.lineno except ValueError" ]
[ "'+': modifier = PROMOTE else: modifier = None yield Move(color[step", "import Coords, Move, BLACK, WHITE, DROP, PROMOTE RANKNUM = {", "unicode_literals from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE", "elif line[0].isupper(): dst = Coords(int(line[2]), RANKNUM[line[3]]) yield Move(color[step & 1],", "line[0].isdigit(): src = Coords(int(line[0]), RANKNUM[line[1]]) dst = Coords(int(line[2]), RANKNUM[line[3]]) if", "7, 'h': 8, 'i': 9 } def decoder(f): color =", "line in f: line = line.strip() if line[0] == '[':", "-*- coding: utf-8 -*- from __future__ import unicode_literals from shogitk.shogi", "Move(color[step & 1], dst, src, None, modifier=modifier) step += 1", "from __future__ import unicode_literals from shogitk.shogi import Coords, Move, BLACK,", "= Coords(int(line[2]), RANKNUM[line[3]]) yield Move(color[step & 1], dst, None, line[0],", "'d': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8,", "= line.strip() if line[0] == '[': pass elif line[0].isdigit(): src", "elif line[0].isdigit(): src = Coords(int(line[0]), RANKNUM[line[1]]) dst = Coords(int(line[2]), RANKNUM[line[3]])", "step = 0 for line in f: line = line.strip()", "'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9", "1 elif line[0].isupper(): dst = Coords(int(line[2]), RANKNUM[line[3]]) yield Move(color[step &", "dst = Coords(int(line[2]), RANKNUM[line[3]]) if line[-1] == '+': modifier =", "f: line = line.strip() if line[0] == '[': pass elif", "from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE RANKNUM", "DROP, PROMOTE RANKNUM = { 'a': 1, 'b': 2, 'c':", "coding: utf-8 -*- from __future__ import unicode_literals from shogitk.shogi import", "shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE RANKNUM =", "def decoder(f): color = [BLACK, WHITE] step = 0 for", "} def decoder(f): color = [BLACK, WHITE] step = 0", "'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6,", "__future__ import unicode_literals from shogitk.shogi import Coords, Move, BLACK, WHITE,", "modifier=modifier) step += 1 elif line[0].isupper(): dst = Coords(int(line[2]), RANKNUM[line[3]])", "decoder(f): color = [BLACK, WHITE] step = 0 for line", "+= 1 elif line[0].isupper(): dst = Coords(int(line[2]), RANKNUM[line[3]]) yield Move(color[step", "line[-1] == '+': modifier = PROMOTE else: modifier = None", "6, 'g': 7, 'h': 8, 'i': 9 } def decoder(f):", "& 1], dst, src, None, modifier=modifier) step += 1 elif", "[BLACK, WHITE] step = 0 for line in f: line", "RANKNUM[line[3]]) yield Move(color[step & 1], dst, None, line[0], modifier=DROP) step", "import unicode_literals from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP,", "if line[0] == '[': pass elif line[0].isdigit(): src = Coords(int(line[0]),", "{ 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e':", "else: modifier = None yield Move(color[step & 1], dst, src,", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from", "= Coords(int(line[2]), RANKNUM[line[3]]) if line[-1] == '+': modifier = PROMOTE", "== '+': modifier = PROMOTE else: modifier = None yield", "RANKNUM[line[3]]) if line[-1] == '+': modifier = PROMOTE else: modifier", "yield Move(color[step & 1], dst, None, line[0], modifier=DROP) step +=", "PROMOTE else: modifier = None yield Move(color[step & 1], dst,", "dst = Coords(int(line[2]), RANKNUM[line[3]]) yield Move(color[step & 1], dst, None,", "Coords, Move, BLACK, WHITE, DROP, PROMOTE RANKNUM = { 'a':", "modifier = None yield Move(color[step & 1], dst, src, None,", "RANKNUM[line[1]]) dst = Coords(int(line[2]), RANKNUM[line[3]]) if line[-1] == '+': modifier", "Coords(int(line[0]), RANKNUM[line[1]]) dst = Coords(int(line[2]), RANKNUM[line[3]]) if line[-1] == '+':", "if line[-1] == '+': modifier = PROMOTE else: modifier =", "for line in f: line = line.strip() if line[0] ==", "-*- from __future__ import unicode_literals from shogitk.shogi import Coords, Move,", "'g': 7, 'h': 8, 'i': 9 } def decoder(f): color", "'[': pass elif line[0].isdigit(): src = Coords(int(line[0]), RANKNUM[line[1]]) dst =", "in f: line = line.strip() if line[0] == '[': pass", "= PROMOTE else: modifier = None yield Move(color[step & 1],", "'h': 8, 'i': 9 } def decoder(f): color = [BLACK,", "'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7,", "src = Coords(int(line[0]), RANKNUM[line[1]]) dst = Coords(int(line[2]), RANKNUM[line[3]]) if line[-1]", "yield Move(color[step & 1], dst, src, None, modifier=modifier) step +=", "'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5,", "Move(color[step & 1], dst, None, line[0], modifier=DROP) step += 1", "color = [BLACK, WHITE] step = 0 for line in", "= { 'a': 1, 'b': 2, 'c': 3, 'd': 4,", "src, None, modifier=modifier) step += 1 elif line[0].isupper(): dst =", "line[0] == '[': pass elif line[0].isdigit(): src = Coords(int(line[0]), RANKNUM[line[1]])", "None, modifier=modifier) step += 1 elif line[0].isupper(): dst = Coords(int(line[2]),", "= None yield Move(color[step & 1], dst, src, None, modifier=modifier)", "WHITE, DROP, PROMOTE RANKNUM = { 'a': 1, 'b': 2,", "= [BLACK, WHITE] step = 0 for line in f:", "line.strip() if line[0] == '[': pass elif line[0].isdigit(): src =", "modifier = PROMOTE else: modifier = None yield Move(color[step &", "dst, src, None, modifier=modifier) step += 1 elif line[0].isupper(): dst", "Coords(int(line[2]), RANKNUM[line[3]]) if line[-1] == '+': modifier = PROMOTE else:", "= 0 for line in f: line = line.strip() if", "RANKNUM = { 'a': 1, 'b': 2, 'c': 3, 'd':", "0 for line in f: line = line.strip() if line[0]", "Move, BLACK, WHITE, DROP, PROMOTE RANKNUM = { 'a': 1,", "PROMOTE RANKNUM = { 'a': 1, 'b': 2, 'c': 3,", "BLACK, WHITE, DROP, PROMOTE RANKNUM = { 'a': 1, 'b':", "8, 'i': 9 } def decoder(f): color = [BLACK, WHITE]", "9 } def decoder(f): color = [BLACK, WHITE] step =", "WHITE] step = 0 for line in f: line =", "4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i':", "'i': 9 } def decoder(f): color = [BLACK, WHITE] step", "2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g':", "= Coords(int(line[0]), RANKNUM[line[1]]) dst = Coords(int(line[2]), RANKNUM[line[3]]) if line[-1] ==", "None yield Move(color[step & 1], dst, src, None, modifier=modifier) step", "line = line.strip() if line[0] == '[': pass elif line[0].isdigit():", "1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f':", "'f': 6, 'g': 7, 'h': 8, 'i': 9 } def", "utf-8 -*- from __future__ import unicode_literals from shogitk.shogi import Coords,", "3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h':", "pass elif line[0].isdigit(): src = Coords(int(line[0]), RANKNUM[line[1]]) dst = Coords(int(line[2]),", "== '[': pass elif line[0].isdigit(): src = Coords(int(line[0]), RANKNUM[line[1]]) dst", "line[0].isupper(): dst = Coords(int(line[2]), RANKNUM[line[3]]) yield Move(color[step & 1], dst,", "step += 1 elif line[0].isupper(): dst = Coords(int(line[2]), RANKNUM[line[3]]) yield", "1], dst, src, None, modifier=modifier) step += 1 elif line[0].isupper():", "5, 'f': 6, 'g': 7, 'h': 8, 'i': 9 }", "Coords(int(line[2]), RANKNUM[line[3]]) yield Move(color[step & 1], dst, None, line[0], modifier=DROP)" ]
[ "collateral ratio\") @click.option( '--liquidation-duration', type=int, help=\"The liquidation duration in minutes\")", "import click from . import utils @click.group() def main(): \"Simple", "score) tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash @main.command() @click.option(", "the transaction') def set_score(oracle, score, private_key): \"Edit oracle's score\" oracle", "= 0 value = int(ether_price * 100) elif collateral_ratio: var_code", "to sign the transaction') def finish_recruiting(private_key): \"Set recruiting as finished\"", "@main.command() @click.option('--oracle', required=True, help=\"The oracle's address\") @click.option('--score', type=int, required=True, help=\"The", "type=int, help=\"The liquidation duration in minutes\") @click.option( '--private-key', callback=utils.check_account, help='The", "oracle's address\") @click.option('--score', type=int, required=True, help=\"The oracle's score\") @click.option( '--private-key',", "key to sign the transaction') def finish_recruiting(private_key): \"Set recruiting as", "\"You should set one variable per vote\" if ether_price: var_code", "price in ether dollar\") @click.option('--collateral-ratio', type=float, help=\"The collateral ratio\") @click.option(", "0 value = int(ether_price * 100) elif collateral_ratio: var_code =", "sign the transaction') def set_score(oracle, score, private_key): \"Edit oracle's score\"", "@click.option( '--private-key', callback=utils.check_account, help='The privat key to sign the transaction')", "tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash @main.command() @click.option( '--private-key',", "liquidation_duration * 60 func = utils.contracts['oracles'].functions.vote(var_code, value) tx_hash = utils.send_transaction(func,", "def finish_recruiting(private_key): \"Set recruiting as finished\" func = utils.contracts['oracles'].functions.finishRecruiting() tx_hash", "'--liquidation-duration', type=int, help=\"The liquidation duration in minutes\") @click.option( '--private-key', callback=utils.check_account,", "score, private_key): \"Edit oracle's score\" oracle = utils.w3.toChecksumAddress(oracle) func =", "Bank\" assert [ether_price, collateral_ratio, liquidation_duration ].count(None) == 2, \"You should", "privat key to sign the transaction') def vote(ether_price, collateral_ratio, liquidation_duration,", "collateral_ratio: var_code = 1 value = int(collateral_ratio * 1000) elif", "oracles to work with Ether dollar\" pass @main.command() @click.option('--ether-price', type=float,", "def vote(ether_price, collateral_ratio, liquidation_duration, private_key): \"Vote on the variable for", "var_code = 1 value = int(collateral_ratio * 1000) elif liquidation_duration:", ". import utils @click.group() def main(): \"Simple CLI for oracles", "0, private_key) return tx_hash @main.command() @click.option( '--private-key', callback=utils.check_account, help='The privat", "vote(ether_price, collateral_ratio, liquidation_duration, private_key): \"Vote on the variable for setting", "in minutes\") @click.option( '--private-key', callback=utils.check_account, help='The privat key to sign", "elif collateral_ratio: var_code = 1 value = int(collateral_ratio * 1000)", "variable for setting up Ether Bank\" assert [ether_price, collateral_ratio, liquidation_duration", "to sign the transaction') def vote(ether_price, collateral_ratio, liquidation_duration, private_key): \"Vote", "int(collateral_ratio * 1000) elif liquidation_duration: var_code = 2 value =", "type=float, help=\"The ether price in ether dollar\") @click.option('--collateral-ratio', type=float, help=\"The", "utils.send_transaction(func, 0, private_key) return tx_hash @main.command() @click.option('--oracle', required=True, help=\"The oracle's", "value = int(ether_price * 100) elif collateral_ratio: var_code = 1", "= utils.contracts['oracles'].functions.finishRecruiting() tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash if", "finished\" func = utils.contracts['oracles'].functions.finishRecruiting() tx_hash = utils.send_transaction(func, 0, private_key) return", "CLI for oracles to work with Ether dollar\" pass @main.command()", "on the variable for setting up Ether Bank\" assert [ether_price,", "value = int(collateral_ratio * 1000) elif liquidation_duration: var_code = 2", "key to sign the transaction') def set_score(oracle, score, private_key): \"Edit", "ether_price: var_code = 0 value = int(ether_price * 100) elif", "0, private_key) return tx_hash @main.command() @click.option('--oracle', required=True, help=\"The oracle's address\")", "Ether dollar\" pass @main.command() @click.option('--ether-price', type=float, help=\"The ether price in", "@click.option( '--liquidation-duration', type=int, help=\"The liquidation duration in minutes\") @click.option( '--private-key',", "the transaction') def vote(ether_price, collateral_ratio, liquidation_duration, private_key): \"Vote on the", "1 value = int(collateral_ratio * 1000) elif liquidation_duration: var_code =", "score\" oracle = utils.w3.toChecksumAddress(oracle) func = utils.contracts['oracles'].functions.setScore(oracle, score) tx_hash =", "utils.send_transaction(func, 0, private_key) return tx_hash @main.command() @click.option( '--private-key', callback=utils.check_account, help='The", "'--private-key', callback=utils.check_account, help='The privat key to sign the transaction') def", "variable per vote\" if ether_price: var_code = 0 value =", "in ether dollar\") @click.option('--collateral-ratio', type=float, help=\"The collateral ratio\") @click.option( '--liquidation-duration',", "= int(collateral_ratio * 1000) elif liquidation_duration: var_code = 2 value", "value = liquidation_duration * 60 func = utils.contracts['oracles'].functions.vote(var_code, value) tx_hash", "utils.contracts['oracles'].functions.setScore(oracle, score) tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash @main.command()", "@click.option('--ether-price', type=float, help=\"The ether price in ether dollar\") @click.option('--collateral-ratio', type=float,", "help=\"The oracle's address\") @click.option('--score', type=int, required=True, help=\"The oracle's score\") @click.option(", "func = utils.contracts['oracles'].functions.finishRecruiting() tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash", "tx_hash @main.command() @click.option( '--private-key', callback=utils.check_account, help='The privat key to sign", "help='The privat key to sign the transaction') def finish_recruiting(private_key): \"Set", "= utils.contracts['oracles'].functions.vote(var_code, value) tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash", "transaction') def set_score(oracle, score, private_key): \"Edit oracle's score\" oracle =", "recruiting as finished\" func = utils.contracts['oracles'].functions.finishRecruiting() tx_hash = utils.send_transaction(func, 0,", "oracle = utils.w3.toChecksumAddress(oracle) func = utils.contracts['oracles'].functions.setScore(oracle, score) tx_hash = utils.send_transaction(func,", "ether price in ether dollar\") @click.option('--collateral-ratio', type=float, help=\"The collateral ratio\")", "@click.option('--collateral-ratio', type=float, help=\"The collateral ratio\") @click.option( '--liquidation-duration', type=int, help=\"The liquidation", "liquidation duration in minutes\") @click.option( '--private-key', callback=utils.check_account, help='The privat key", "one variable per vote\" if ether_price: var_code = 0 value", "utils.w3.toChecksumAddress(oracle) func = utils.contracts['oracles'].functions.setScore(oracle, score) tx_hash = utils.send_transaction(func, 0, private_key)", "@click.option('--oracle', required=True, help=\"The oracle's address\") @click.option('--score', type=int, required=True, help=\"The oracle's", "utils.send_transaction(func, 0, private_key) return tx_hash if __name__ == '__main__': main()", "2, \"You should set one variable per vote\" if ether_price:", "callback=utils.check_account, help='The privat key to sign the transaction') def finish_recruiting(private_key):", "tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash if __name__ ==", "privat key to sign the transaction') def finish_recruiting(private_key): \"Set recruiting", "set_score(oracle, score, private_key): \"Edit oracle's score\" oracle = utils.w3.toChecksumAddress(oracle) func", "collateral_ratio, liquidation_duration ].count(None) == 2, \"You should set one variable", "work with Ether dollar\" pass @main.command() @click.option('--ether-price', type=float, help=\"The ether", "required=True, help=\"The oracle's address\") @click.option('--score', type=int, required=True, help=\"The oracle's score\")", "privat key to sign the transaction') def set_score(oracle, score, private_key):", "= utils.w3.toChecksumAddress(oracle) func = utils.contracts['oracles'].functions.setScore(oracle, score) tx_hash = utils.send_transaction(func, 0,", "\"Vote on the variable for setting up Ether Bank\" assert", "Ether Bank\" assert [ether_price, collateral_ratio, liquidation_duration ].count(None) == 2, \"You", "help=\"The oracle's score\") @click.option( '--private-key', callback=utils.check_account, help='The privat key to", "pass @main.command() @click.option('--ether-price', type=float, help=\"The ether price in ether dollar\")", "assert [ether_price, collateral_ratio, liquidation_duration ].count(None) == 2, \"You should set", "as finished\" func = utils.contracts['oracles'].functions.finishRecruiting() tx_hash = utils.send_transaction(func, 0, private_key)", "duration in minutes\") @click.option( '--private-key', callback=utils.check_account, help='The privat key to", "help='The privat key to sign the transaction') def vote(ether_price, collateral_ratio,", "type=float, help=\"The collateral ratio\") @click.option( '--liquidation-duration', type=int, help=\"The liquidation duration", "the transaction') def finish_recruiting(private_key): \"Set recruiting as finished\" func =", "utils.contracts['oracles'].functions.vote(var_code, value) tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash @main.command()", "the variable for setting up Ether Bank\" assert [ether_price, collateral_ratio,", "= liquidation_duration * 60 func = utils.contracts['oracles'].functions.vote(var_code, value) tx_hash =", "== 2, \"You should set one variable per vote\" if", "value) tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash @main.command() @click.option('--oracle',", "to sign the transaction') def set_score(oracle, score, private_key): \"Edit oracle's", "100) elif collateral_ratio: var_code = 1 value = int(collateral_ratio *", "@main.command() @click.option('--ether-price', type=float, help=\"The ether price in ether dollar\") @click.option('--collateral-ratio',", "setting up Ether Bank\" assert [ether_price, collateral_ratio, liquidation_duration ].count(None) ==", "sign the transaction') def vote(ether_price, collateral_ratio, liquidation_duration, private_key): \"Vote on", "private_key): \"Edit oracle's score\" oracle = utils.w3.toChecksumAddress(oracle) func = utils.contracts['oracles'].functions.setScore(oracle,", "dollar\" pass @main.command() @click.option('--ether-price', type=float, help=\"The ether price in ether", "= utils.send_transaction(func, 0, private_key) return tx_hash if __name__ == '__main__':", "callback=utils.check_account, help='The privat key to sign the transaction') def set_score(oracle,", "help=\"The ether price in ether dollar\") @click.option('--collateral-ratio', type=float, help=\"The collateral", "<reponame>ideal-money/etherbank-cli import click from . import utils @click.group() def main():", "for setting up Ether Bank\" assert [ether_price, collateral_ratio, liquidation_duration ].count(None)", "import utils @click.group() def main(): \"Simple CLI for oracles to", "* 1000) elif liquidation_duration: var_code = 2 value = liquidation_duration", "ether dollar\") @click.option('--collateral-ratio', type=float, help=\"The collateral ratio\") @click.option( '--liquidation-duration', type=int,", "liquidation_duration ].count(None) == 2, \"You should set one variable per", "func = utils.contracts['oracles'].functions.setScore(oracle, score) tx_hash = utils.send_transaction(func, 0, private_key) return", "private_key) return tx_hash @main.command() @click.option('--oracle', required=True, help=\"The oracle's address\") @click.option('--score',", "help=\"The liquidation duration in minutes\") @click.option( '--private-key', callback=utils.check_account, help='The privat", "return tx_hash @main.command() @click.option('--oracle', required=True, help=\"The oracle's address\") @click.option('--score', type=int,", "callback=utils.check_account, help='The privat key to sign the transaction') def vote(ether_price,", "oracle's score\") @click.option( '--private-key', callback=utils.check_account, help='The privat key to sign", "up Ether Bank\" assert [ether_price, collateral_ratio, liquidation_duration ].count(None) == 2,", "ratio\") @click.option( '--liquidation-duration', type=int, help=\"The liquidation duration in minutes\") @click.option(", "utils.contracts['oracles'].functions.finishRecruiting() tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash if __name__", "with Ether dollar\" pass @main.command() @click.option('--ether-price', type=float, help=\"The ether price", "if ether_price: var_code = 0 value = int(ether_price * 100)", "@click.group() def main(): \"Simple CLI for oracles to work with", "tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash @main.command() @click.option('--oracle', required=True,", "= utils.contracts['oracles'].functions.setScore(oracle, score) tx_hash = utils.send_transaction(func, 0, private_key) return tx_hash", "= 1 value = int(collateral_ratio * 1000) elif liquidation_duration: var_code", "elif liquidation_duration: var_code = 2 value = liquidation_duration * 60", "for oracles to work with Ether dollar\" pass @main.command() @click.option('--ether-price',", "[ether_price, collateral_ratio, liquidation_duration ].count(None) == 2, \"You should set one", "liquidation_duration: var_code = 2 value = liquidation_duration * 60 func", "help=\"The collateral ratio\") @click.option( '--liquidation-duration', type=int, help=\"The liquidation duration in", "finish_recruiting(private_key): \"Set recruiting as finished\" func = utils.contracts['oracles'].functions.finishRecruiting() tx_hash =", "@click.option('--score', type=int, required=True, help=\"The oracle's score\") @click.option( '--private-key', callback=utils.check_account, help='The", "1000) elif liquidation_duration: var_code = 2 value = liquidation_duration *", "60 func = utils.contracts['oracles'].functions.vote(var_code, value) tx_hash = utils.send_transaction(func, 0, private_key)", "address\") @click.option('--score', type=int, required=True, help=\"The oracle's score\") @click.option( '--private-key', callback=utils.check_account,", "var_code = 2 value = liquidation_duration * 60 func =", "transaction') def vote(ether_price, collateral_ratio, liquidation_duration, private_key): \"Vote on the variable", "set one variable per vote\" if ether_price: var_code = 0", "per vote\" if ether_price: var_code = 0 value = int(ether_price", "int(ether_price * 100) elif collateral_ratio: var_code = 1 value =", "* 100) elif collateral_ratio: var_code = 1 value = int(collateral_ratio", "dollar\") @click.option('--collateral-ratio', type=float, help=\"The collateral ratio\") @click.option( '--liquidation-duration', type=int, help=\"The", "\"Set recruiting as finished\" func = utils.contracts['oracles'].functions.finishRecruiting() tx_hash = utils.send_transaction(func,", "click from . import utils @click.group() def main(): \"Simple CLI", "var_code = 0 value = int(ether_price * 100) elif collateral_ratio:", "private_key): \"Vote on the variable for setting up Ether Bank\"", "to work with Ether dollar\" pass @main.command() @click.option('--ether-price', type=float, help=\"The", "utils @click.group() def main(): \"Simple CLI for oracles to work", "from . import utils @click.group() def main(): \"Simple CLI for", "oracle's score\" oracle = utils.w3.toChecksumAddress(oracle) func = utils.contracts['oracles'].functions.setScore(oracle, score) tx_hash", "transaction') def finish_recruiting(private_key): \"Set recruiting as finished\" func = utils.contracts['oracles'].functions.finishRecruiting()", "private_key) return tx_hash @main.command() @click.option( '--private-key', callback=utils.check_account, help='The privat key", "type=int, required=True, help=\"The oracle's score\") @click.option( '--private-key', callback=utils.check_account, help='The privat", "return tx_hash @main.command() @click.option( '--private-key', callback=utils.check_account, help='The privat key to", "@main.command() @click.option( '--private-key', callback=utils.check_account, help='The privat key to sign the", "= 2 value = liquidation_duration * 60 func = utils.contracts['oracles'].functions.vote(var_code,", "2 value = liquidation_duration * 60 func = utils.contracts['oracles'].functions.vote(var_code, value)", "* 60 func = utils.contracts['oracles'].functions.vote(var_code, value) tx_hash = utils.send_transaction(func, 0,", "\"Edit oracle's score\" oracle = utils.w3.toChecksumAddress(oracle) func = utils.contracts['oracles'].functions.setScore(oracle, score)", "def set_score(oracle, score, private_key): \"Edit oracle's score\" oracle = utils.w3.toChecksumAddress(oracle)", "def main(): \"Simple CLI for oracles to work with Ether", "should set one variable per vote\" if ether_price: var_code =", "tx_hash @main.command() @click.option('--oracle', required=True, help=\"The oracle's address\") @click.option('--score', type=int, required=True,", "score\") @click.option( '--private-key', callback=utils.check_account, help='The privat key to sign the", "= utils.send_transaction(func, 0, private_key) return tx_hash @main.command() @click.option( '--private-key', callback=utils.check_account,", "vote\" if ether_price: var_code = 0 value = int(ether_price *", "minutes\") @click.option( '--private-key', callback=utils.check_account, help='The privat key to sign the", "].count(None) == 2, \"You should set one variable per vote\"", "\"Simple CLI for oracles to work with Ether dollar\" pass", "collateral_ratio, liquidation_duration, private_key): \"Vote on the variable for setting up", "liquidation_duration, private_key): \"Vote on the variable for setting up Ether", "help='The privat key to sign the transaction') def set_score(oracle, score,", "= int(ether_price * 100) elif collateral_ratio: var_code = 1 value", "= utils.send_transaction(func, 0, private_key) return tx_hash @main.command() @click.option('--oracle', required=True, help=\"The", "main(): \"Simple CLI for oracles to work with Ether dollar\"", "key to sign the transaction') def vote(ether_price, collateral_ratio, liquidation_duration, private_key):", "sign the transaction') def finish_recruiting(private_key): \"Set recruiting as finished\" func", "func = utils.contracts['oracles'].functions.vote(var_code, value) tx_hash = utils.send_transaction(func, 0, private_key) return", "required=True, help=\"The oracle's score\") @click.option( '--private-key', callback=utils.check_account, help='The privat key" ]
[ "= self.machine.session() self.session.run(f\"cd {project_path}\") return old_apply(self, project) cls.apply = new_apply", "new_apply(self, project): with self.machine.tempdir() as tempdir: project_path = tempdir /", "project.files(): if f.name in existing_files: copy(f.path, project_path / f.name) with", "user=user, keyfile=keyfile) return rem def get_local_machine(): return local def with_machine_rule(cls):", "@_once def get_remote_machine(host, user, keyfile): rem = ParamikoMachine(host, user=user, keyfile=keyfile)", "return local def with_machine_rule(cls): old_init = cls.__init__ def new_init(self, config):", "config[\"machine\"][\"user\"]) self.files_to_copy = config[\"machine\"].get(\"files_to_copy\") else: raise ValueError(f\"Invalid machine type: {config['machine']['type']}\")", "self.machine.tempdir() as tempdir: project_path = tempdir / \"project\" project_path.mkdir() existing_files", "for f in project.root.list()]) if self.files_to_copy: for fname in self.files_to_copy:", "stream=None) rem = ParamikoMachine(host, user=user, password=password) return rem @_once def", "get_local_machine(): return local def with_machine_rule(cls): old_init = cls.__init__ def new_init(self,", "if machine_type == \"local\": self.machine = get_local_machine() self.files_to_copy = None", "== \"remote\": if \"keyfile\" in config[\"machine\"]: self.machine = get_remote_machine(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"],", "\"local\" else: machine_type = config[\"machine\"][\"type\"] if machine_type == \"local\": self.machine", "in self.files_to_copy: if fname in existing_files: copy(project.root / fname, project_path", "raise ValueError(f\"Invalid machine type: {config['machine']['type']}\") self.machine_type = machine_type old_init(self, config)", "plumbum.path.utils import copy def _once(f): res = None def wrapped(*args,", "self.files_to_copy = config[\"machine\"].get(\"files_to_copy\") else: raise ValueError(f\"Invalid machine type: {config['machine']['type']}\") self.machine_type", "def get_remote_machine_with_password(host, user): password = getpass.getpass(prompt=f\"Password for {user}@{host}: \", stream=None)", "def get_remote_machine(host, user, keyfile): rem = ParamikoMachine(host, user=user, keyfile=keyfile) return", "config[\"machine\"]: self.machine = get_remote_machine(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"], config[\"machine\"][\"keyfile\"]) else: self.machine = get_remote_machine_with_password(config[\"machine\"][\"host\"],", "f.name) with self.machine.cwd(project_path): self.session = self.machine.session() self.session.run(f\"cd {project_path}\") return old_apply(self,", "/ fname) else: for f in project.files(): if f.name in", "plumbum.machines.paramiko_machine import ParamikoMachine from plumbum.path.utils import copy def _once(f): res", "self.machine = get_remote_machine_with_password(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"]) self.files_to_copy = config[\"machine\"].get(\"files_to_copy\") else: raise ValueError(f\"Invalid", "def new_apply(self, project): with self.machine.tempdir() as tempdir: project_path = tempdir", "copy(project.root / fname, project_path / fname) else: for f in", "ValueError(f\"Invalid machine type: {config['machine']['type']}\") self.machine_type = machine_type old_init(self, config) cls.__init__", "return rem def get_local_machine(): return local def with_machine_rule(cls): old_init =", "machine_type == \"remote\": if \"keyfile\" in config[\"machine\"]: self.machine = get_remote_machine(config[\"machine\"][\"host\"],", "new_init old_apply = cls.apply def new_apply(self, project): with self.machine.tempdir() as", "tempdir: project_path = tempdir / \"project\" project_path.mkdir() existing_files = set([f.name", "rem def get_local_machine(): return local def with_machine_rule(cls): old_init = cls.__init__", "local from plumbum.machines.paramiko_machine import ParamikoMachine from plumbum.path.utils import copy def", "res = None def wrapped(*args, **kwargs): nonlocal res if res", "cls.__init__ def new_init(self, config): if \"machine\" not in config: machine_type", "f(*args, **kwargs) return res return wrapped @_once def get_remote_machine_with_password(host, user):", "ParamikoMachine(host, user=user, password=password) return rem @_once def get_remote_machine(host, user, keyfile):", "= machine_type old_init(self, config) cls.__init__ = new_init old_apply = cls.apply", "with self.machine.cwd(project_path): self.session = self.machine.session() self.session.run(f\"cd {project_path}\") return old_apply(self, project)", "plumbum import local from plumbum.machines.paramiko_machine import ParamikoMachine from plumbum.path.utils import", "in config[\"machine\"]: self.machine = get_remote_machine(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"], config[\"machine\"][\"keyfile\"]) else: self.machine =", "as tempdir: project_path = tempdir / \"project\" project_path.mkdir() existing_files =", "config[\"machine\"].get(\"files_to_copy\") else: raise ValueError(f\"Invalid machine type: {config['machine']['type']}\") self.machine_type = machine_type", "if f.name in existing_files: copy(f.path, project_path / f.name) with self.machine.cwd(project_path):", "**kwargs) return res return wrapped @_once def get_remote_machine_with_password(host, user): password", "self.files_to_copy: if fname in existing_files: copy(project.root / fname, project_path /", "= get_local_machine() self.files_to_copy = None elif machine_type == \"remote\": if", "fname) else: for f in project.files(): if f.name in existing_files:", "in project.files(): if f.name in existing_files: copy(f.path, project_path / f.name)", "\"project\" project_path.mkdir() existing_files = set([f.name for f in project.root.list()]) if", "/ f.name) with self.machine.cwd(project_path): self.session = self.machine.session() self.session.run(f\"cd {project_path}\") return", "existing_files: copy(project.root / fname, project_path / fname) else: for f", "None def wrapped(*args, **kwargs): nonlocal res if res is None:", "= f(*args, **kwargs) return res return wrapped @_once def get_remote_machine_with_password(host,", "= ParamikoMachine(host, user=user, keyfile=keyfile) return rem def get_local_machine(): return local", "== \"local\": self.machine = get_local_machine() self.files_to_copy = None elif machine_type", "f in project.root.list()]) if self.files_to_copy: for fname in self.files_to_copy: if", "user): password = getpass.getpass(prompt=f\"Password for {user}@{host}: \", stream=None) rem =", "from plumbum.machines.paramiko_machine import ParamikoMachine from plumbum.path.utils import copy def _once(f):", "self.session.run(f\"cd {project_path}\") return old_apply(self, project) cls.apply = new_apply return cls", "with_machine_rule(cls): old_init = cls.__init__ def new_init(self, config): if \"machine\" not", "config[\"machine\"][\"type\"] if machine_type == \"local\": self.machine = get_local_machine() self.files_to_copy =", "import local from plumbum.machines.paramiko_machine import ParamikoMachine from plumbum.path.utils import copy", "wrapped @_once def get_remote_machine_with_password(host, user): password = getpass.getpass(prompt=f\"Password for {user}@{host}:", "keyfile=keyfile) return rem def get_local_machine(): return local def with_machine_rule(cls): old_init", "password = getpass.getpass(prompt=f\"Password for {user}@{host}: \", stream=None) rem = ParamikoMachine(host,", "user, keyfile): rem = ParamikoMachine(host, user=user, keyfile=keyfile) return rem def", "\"remote\": if \"keyfile\" in config[\"machine\"]: self.machine = get_remote_machine(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"], config[\"machine\"][\"keyfile\"])", "/ fname, project_path / fname) else: for f in project.files():", "in existing_files: copy(f.path, project_path / f.name) with self.machine.cwd(project_path): self.session =", "= getpass.getpass(prompt=f\"Password for {user}@{host}: \", stream=None) rem = ParamikoMachine(host, user=user,", "f in project.files(): if f.name in existing_files: copy(f.path, project_path /", "old_init(self, config) cls.__init__ = new_init old_apply = cls.apply def new_apply(self,", "self.machine.session() self.session.run(f\"cd {project_path}\") return old_apply(self, project) cls.apply = new_apply return", "= ParamikoMachine(host, user=user, password=password) return rem @_once def get_remote_machine(host, user,", "config) cls.__init__ = new_init old_apply = cls.apply def new_apply(self, project):", "if self.files_to_copy: for fname in self.files_to_copy: if fname in existing_files:", "get_remote_machine(host, user, keyfile): rem = ParamikoMachine(host, user=user, keyfile=keyfile) return rem", "else: machine_type = config[\"machine\"][\"type\"] if machine_type == \"local\": self.machine =", "{user}@{host}: \", stream=None) rem = ParamikoMachine(host, user=user, password=password) return rem", "get_remote_machine(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"], config[\"machine\"][\"keyfile\"]) else: self.machine = get_remote_machine_with_password(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"]) self.files_to_copy =", "if \"keyfile\" in config[\"machine\"]: self.machine = get_remote_machine(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"], config[\"machine\"][\"keyfile\"]) else:", "project_path.mkdir() existing_files = set([f.name for f in project.root.list()]) if self.files_to_copy:", "ParamikoMachine(host, user=user, keyfile=keyfile) return rem def get_local_machine(): return local def", "def get_local_machine(): return local def with_machine_rule(cls): old_init = cls.__init__ def", "= \"local\" else: machine_type = config[\"machine\"][\"type\"] if machine_type == \"local\":", "wrapped(*args, **kwargs): nonlocal res if res is None: res =", "project_path = tempdir / \"project\" project_path.mkdir() existing_files = set([f.name for", "in existing_files: copy(project.root / fname, project_path / fname) else: for", "res is None: res = f(*args, **kwargs) return res return", "get_remote_machine_with_password(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"]) self.files_to_copy = config[\"machine\"].get(\"files_to_copy\") else: raise ValueError(f\"Invalid machine type:", "machine_type == \"local\": self.machine = get_local_machine() self.files_to_copy = None elif", "config[\"machine\"][\"keyfile\"]) else: self.machine = get_remote_machine_with_password(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"]) self.files_to_copy = config[\"machine\"].get(\"files_to_copy\") else:", "def _once(f): res = None def wrapped(*args, **kwargs): nonlocal res", "else: raise ValueError(f\"Invalid machine type: {config['machine']['type']}\") self.machine_type = machine_type old_init(self,", "= cls.__init__ def new_init(self, config): if \"machine\" not in config:", "getpass.getpass(prompt=f\"Password for {user}@{host}: \", stream=None) rem = ParamikoMachine(host, user=user, password=password)", "res if res is None: res = f(*args, **kwargs) return", "with self.machine.tempdir() as tempdir: project_path = tempdir / \"project\" project_path.mkdir()", "self.files_to_copy = None elif machine_type == \"remote\": if \"keyfile\" in", "from plumbum import local from plumbum.machines.paramiko_machine import ParamikoMachine from plumbum.path.utils", "f.name in existing_files: copy(f.path, project_path / f.name) with self.machine.cwd(project_path): self.session", "project_path / f.name) with self.machine.cwd(project_path): self.session = self.machine.session() self.session.run(f\"cd {project_path}\")", "get_remote_machine_with_password(host, user): password = getpass.getpass(prompt=f\"Password for {user}@{host}: \", stream=None) rem", "return wrapped @_once def get_remote_machine_with_password(host, user): password = getpass.getpass(prompt=f\"Password for", "= get_remote_machine(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"], config[\"machine\"][\"keyfile\"]) else: self.machine = get_remote_machine_with_password(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"]) self.files_to_copy", "is None: res = f(*args, **kwargs) return res return wrapped", "def with_machine_rule(cls): old_init = cls.__init__ def new_init(self, config): if \"machine\"", "config[\"machine\"][\"user\"], config[\"machine\"][\"keyfile\"]) else: self.machine = get_remote_machine_with_password(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"]) self.files_to_copy = config[\"machine\"].get(\"files_to_copy\")", "existing_files: copy(f.path, project_path / f.name) with self.machine.cwd(project_path): self.session = self.machine.session()", "{config['machine']['type']}\") self.machine_type = machine_type old_init(self, config) cls.__init__ = new_init old_apply", "\"local\": self.machine = get_local_machine() self.files_to_copy = None elif machine_type ==", "else: self.machine = get_remote_machine_with_password(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"]) self.files_to_copy = config[\"machine\"].get(\"files_to_copy\") else: raise", "machine_type = \"local\" else: machine_type = config[\"machine\"][\"type\"] if machine_type ==", "copy def _once(f): res = None def wrapped(*args, **kwargs): nonlocal", "import ParamikoMachine from plumbum.path.utils import copy def _once(f): res =", "password=password) return rem @_once def get_remote_machine(host, user, keyfile): rem =", "\", stream=None) rem = ParamikoMachine(host, user=user, password=password) return rem @_once", "not in config: machine_type = \"local\" else: machine_type = config[\"machine\"][\"type\"]", "self.machine = get_remote_machine(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"], config[\"machine\"][\"keyfile\"]) else: self.machine = get_remote_machine_with_password(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"])", "else: for f in project.files(): if f.name in existing_files: copy(f.path,", "\"keyfile\" in config[\"machine\"]: self.machine = get_remote_machine(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"], config[\"machine\"][\"keyfile\"]) else: self.machine", "self.session = self.machine.session() self.session.run(f\"cd {project_path}\") return old_apply(self, project) cls.apply =", "if \"machine\" not in config: machine_type = \"local\" else: machine_type", "config: machine_type = \"local\" else: machine_type = config[\"machine\"][\"type\"] if machine_type", "nonlocal res if res is None: res = f(*args, **kwargs)", "None: res = f(*args, **kwargs) return res return wrapped @_once", "= config[\"machine\"].get(\"files_to_copy\") else: raise ValueError(f\"Invalid machine type: {config['machine']['type']}\") self.machine_type =", "@_once def get_remote_machine_with_password(host, user): password = getpass.getpass(prompt=f\"Password for {user}@{host}: \",", "config): if \"machine\" not in config: machine_type = \"local\" else:", "copy(f.path, project_path / f.name) with self.machine.cwd(project_path): self.session = self.machine.session() self.session.run(f\"cd", "cls.apply def new_apply(self, project): with self.machine.tempdir() as tempdir: project_path =", "= set([f.name for f in project.root.list()]) if self.files_to_copy: for fname", "= cls.apply def new_apply(self, project): with self.machine.tempdir() as tempdir: project_path", "new_init(self, config): if \"machine\" not in config: machine_type = \"local\"", "old_init = cls.__init__ def new_init(self, config): if \"machine\" not in", "= get_remote_machine_with_password(config[\"machine\"][\"host\"], config[\"machine\"][\"user\"]) self.files_to_copy = config[\"machine\"].get(\"files_to_copy\") else: raise ValueError(f\"Invalid machine", "= None elif machine_type == \"remote\": if \"keyfile\" in config[\"machine\"]:", "= tempdir / \"project\" project_path.mkdir() existing_files = set([f.name for f", "None elif machine_type == \"remote\": if \"keyfile\" in config[\"machine\"]: self.machine", "tempdir / \"project\" project_path.mkdir() existing_files = set([f.name for f in", "return res return wrapped @_once def get_remote_machine_with_password(host, user): password =", "type: {config['machine']['type']}\") self.machine_type = machine_type old_init(self, config) cls.__init__ = new_init", "fname in existing_files: copy(project.root / fname, project_path / fname) else:", "old_apply = cls.apply def new_apply(self, project): with self.machine.tempdir() as tempdir:", "if res is None: res = f(*args, **kwargs) return res", "fname in self.files_to_copy: if fname in existing_files: copy(project.root / fname,", "cls.__init__ = new_init old_apply = cls.apply def new_apply(self, project): with", "from plumbum.path.utils import copy def _once(f): res = None def", "for fname in self.files_to_copy: if fname in existing_files: copy(project.root /", "in project.root.list()]) if self.files_to_copy: for fname in self.files_to_copy: if fname", "project.root.list()]) if self.files_to_copy: for fname in self.files_to_copy: if fname in", "machine type: {config['machine']['type']}\") self.machine_type = machine_type old_init(self, config) cls.__init__ =", "in config: machine_type = \"local\" else: machine_type = config[\"machine\"][\"type\"] if", "_once(f): res = None def wrapped(*args, **kwargs): nonlocal res if", "\"machine\" not in config: machine_type = \"local\" else: machine_type =", "ParamikoMachine from plumbum.path.utils import copy def _once(f): res = None", "rem = ParamikoMachine(host, user=user, password=password) return rem @_once def get_remote_machine(host,", "elif machine_type == \"remote\": if \"keyfile\" in config[\"machine\"]: self.machine =", "machine_type old_init(self, config) cls.__init__ = new_init old_apply = cls.apply def", "self.machine.cwd(project_path): self.session = self.machine.session() self.session.run(f\"cd {project_path}\") return old_apply(self, project) cls.apply", "import copy def _once(f): res = None def wrapped(*args, **kwargs):", "def wrapped(*args, **kwargs): nonlocal res if res is None: res", "= config[\"machine\"][\"type\"] if machine_type == \"local\": self.machine = get_local_machine() self.files_to_copy", "res return wrapped @_once def get_remote_machine_with_password(host, user): password = getpass.getpass(prompt=f\"Password", "res = f(*args, **kwargs) return res return wrapped @_once def", "keyfile): rem = ParamikoMachine(host, user=user, keyfile=keyfile) return rem def get_local_machine():", "**kwargs): nonlocal res if res is None: res = f(*args,", "project_path / fname) else: for f in project.files(): if f.name", "fname, project_path / fname) else: for f in project.files(): if", "user=user, password=password) return rem @_once def get_remote_machine(host, user, keyfile): rem", "self.machine_type = machine_type old_init(self, config) cls.__init__ = new_init old_apply =", "getpass from plumbum import local from plumbum.machines.paramiko_machine import ParamikoMachine from", "def new_init(self, config): if \"machine\" not in config: machine_type =", "existing_files = set([f.name for f in project.root.list()]) if self.files_to_copy: for", "project): with self.machine.tempdir() as tempdir: project_path = tempdir / \"project\"", "set([f.name for f in project.root.list()]) if self.files_to_copy: for fname in", "return rem @_once def get_remote_machine(host, user, keyfile): rem = ParamikoMachine(host,", "machine_type = config[\"machine\"][\"type\"] if machine_type == \"local\": self.machine = get_local_machine()", "import getpass from plumbum import local from plumbum.machines.paramiko_machine import ParamikoMachine", "local def with_machine_rule(cls): old_init = cls.__init__ def new_init(self, config): if", "= new_init old_apply = cls.apply def new_apply(self, project): with self.machine.tempdir()", "if fname in existing_files: copy(project.root / fname, project_path / fname)", "for f in project.files(): if f.name in existing_files: copy(f.path, project_path", "rem @_once def get_remote_machine(host, user, keyfile): rem = ParamikoMachine(host, user=user,", "self.files_to_copy: for fname in self.files_to_copy: if fname in existing_files: copy(project.root", "rem = ParamikoMachine(host, user=user, keyfile=keyfile) return rem def get_local_machine(): return", "get_local_machine() self.files_to_copy = None elif machine_type == \"remote\": if \"keyfile\"", "= None def wrapped(*args, **kwargs): nonlocal res if res is", "for {user}@{host}: \", stream=None) rem = ParamikoMachine(host, user=user, password=password) return", "/ \"project\" project_path.mkdir() existing_files = set([f.name for f in project.root.list()])", "self.machine = get_local_machine() self.files_to_copy = None elif machine_type == \"remote\":" ]
[ "float(input('digite o seu salario: ')) aumento = (salario + (salario", "seu salario: ')) aumento = (salario + (salario * 15)/100", "15)/100 if salario <= 1250 else salario + (salario *", "if salario <= 1250 else salario + (salario * 10)/100)", "o seu salario: ')) aumento = (salario + (salario *", "salario = float(input('digite o seu salario: ')) aumento = (salario", "(salario + (salario * 15)/100 if salario <= 1250 else", "<reponame>legna7/Python<gh_stars>0 salario = float(input('digite o seu salario: ')) aumento =", "= float(input('digite o seu salario: ')) aumento = (salario +", "')) aumento = (salario + (salario * 15)/100 if salario", "= (salario + (salario * 15)/100 if salario <= 1250", "salario <= 1250 else salario + (salario * 10)/100) print(aumento)", "+ (salario * 15)/100 if salario <= 1250 else salario", "aumento = (salario + (salario * 15)/100 if salario <=", "* 15)/100 if salario <= 1250 else salario + (salario", "(salario * 15)/100 if salario <= 1250 else salario +", "salario: ')) aumento = (salario + (salario * 15)/100 if" ]
[ "t = get_tree(\"local-branches\") self.assertEqual(t, []) t = get_tree(\"remote-branches\") self.assertEqual(t, [])", "import os import os.path import airflow import airflow.plugins_manager from airflow", "airflow import configuration from flask import Flask from unittest import", "0) self.assertTrue('git' in (x['id'] for x in t)) def test_tags(self):", "'False') def test_tree(self): with app.app_context(): t = get_tree() self.assertTrue(len(t) >", "os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) def test_tree(self): with", "get_tree(\"remote-branches\") self.assertIsNotNone(t) def test_files(self): with app.app_context(): t = get_tree(\"files\") self.assertTrue(", "Flask from unittest import TestCase, main from airflow_code_editor.commons import PLUGIN_NAME", "self.assertTrue('git' in (x['id'] for x in t)) def test_tags(self): with", "with app.app_context(): t = get_tree(\"local-branches\") self.assertIsNotNone(t) def test_remote_branches(self): with app.app_context():", "get_tree(\"git/HEAD\") self.assertTrue(t is not None) class TestTreeGitDisabled(TestCase): def setUp(self): self.root_dir", "get_tree(\"remote-branches\") self.assertEqual(t, []) t = get_tree(\"files\") self.assertTrue( len([x.get('id') for x", "self.assertTrue(t is not None) class TestTreeGitDisabled(TestCase): def setUp(self): self.root_dir =", "t = get_tree(\"tags\") self.assertEqual(t, []) t = get_tree(\"local-branches\") self.assertEqual(t, [])", "def test_files(self): with app.app_context(): t = get_tree(\"files\") self.assertTrue( len([x.get('id') for", "if x.get('id') == 'test_utils.py']) == 1 ) t = get_tree(\"files/folder\")", "t if x.get('id') == '1']) == 1) def test_git(self): with", "self.assertTrue('git' not in (x['id'] for x in t)) t =", "Flask(__name__) class TestTree(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo',", "not None) class TestTreeGitDisabled(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME,", "get_tree(\"files\") self.assertTrue( len([x.get('id') for x in t if x.get('id') ==", "from unittest import TestCase, main from airflow_code_editor.commons import PLUGIN_NAME from", "configuration.conf.set(PLUGIN_NAME, 'git_enabled', 'False') def test_tree(self): with app.app_context(): t = get_tree()", "with app.app_context(): t = get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' not", "app.app_context(): t = get_tree(\"files\") self.assertTrue( len([x.get('id') for x in t", "None) class TestTreeGitDisabled(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo',", ") t = get_tree(\"files/folder\") self.assertTrue(len([x.get('id') for x in t if", "in (x['id'] for x in t)) t = get_tree(\"tags\") self.assertEqual(t,", "import configuration from flask import Flask from unittest import TestCase,", "test_remote_branches(self): with app.app_context(): t = get_tree(\"remote-branches\") self.assertIsNotNone(t) def test_files(self): with", "#!/usr/bin/env python import os import os.path import airflow import airflow.plugins_manager", "with app.app_context(): t = get_tree(\"git/HEAD\") self.assertTrue(t is not None) class", "self.root_dir) configuration.conf.set(PLUGIN_NAME, 'git_enabled', 'False') def test_tree(self): with app.app_context(): t =", "TestTree(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME,", "import TestCase, main from airflow_code_editor.commons import PLUGIN_NAME from airflow_code_editor.tree import", "test_files(self): with app.app_context(): t = get_tree(\"files\") self.assertTrue( len([x.get('id') for x", "0) self.assertTrue('git' not in (x['id'] for x in t)) t", "from airflow import configuration from flask import Flask from unittest", "unittest import TestCase, main from airflow_code_editor.commons import PLUGIN_NAME from airflow_code_editor.tree", "app.app_context(): t = get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' not in", "[]) t = get_tree(\"remote-branches\") self.assertEqual(t, []) t = get_tree(\"files\") self.assertTrue(", "= get_tree(\"git/HEAD\") self.assertTrue(t is not None) class TestTreeGitDisabled(TestCase): def setUp(self):", "from flask import Flask from unittest import TestCase, main from", "import os.path import airflow import airflow.plugins_manager from airflow import configuration", "main from airflow_code_editor.commons import PLUGIN_NAME from airflow_code_editor.tree import ( get_tree,", "with app.app_context(): t = get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' in", "airflow.plugins_manager from airflow import configuration from flask import Flask from", "== 1 ) t = get_tree(\"files/folder\") self.assertTrue(len([x.get('id') for x in", ") assert airflow.plugins_manager app = Flask(__name__) class TestTree(TestCase): def setUp(self):", "= get_tree(\"tags\") self.assertIsNotNone(t) def test_local_branches(self): with app.app_context(): t = get_tree(\"local-branches\")", "== 'test_utils.py']) == 1 ) t = get_tree(\"files/folder\") self.assertTrue(len([x.get('id') for", "get_tree(\"files/folder\") self.assertTrue(len([x.get('id') for x in t if x.get('id') == '1'])", "= get_tree(\"tags\") self.assertEqual(t, []) t = get_tree(\"local-branches\") self.assertEqual(t, []) t", "'test_utils.py']) == 1 ) t = get_tree(\"files/folder\") self.assertTrue(len([x.get('id') for x", "self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) configuration.conf.set(PLUGIN_NAME,", "[]) t = get_tree(\"local-branches\") self.assertEqual(t, []) t = get_tree(\"remote-branches\") self.assertEqual(t,", "def test_git(self): with app.app_context(): t = get_tree(\"git/HEAD\") self.assertTrue(t is not", "t = get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' not in (x['id']", "in t)) t = get_tree(\"tags\") self.assertEqual(t, []) t = get_tree(\"local-branches\")", "= os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) configuration.conf.set(PLUGIN_NAME, 'git_enabled',", "'root_directory', self.root_dir) def test_tree(self): with app.app_context(): t = get_tree() self.assertTrue(len(t)", "(x['id'] for x in t)) def test_tags(self): with app.app_context(): t", "t = get_tree(\"local-branches\") self.assertIsNotNone(t) def test_remote_branches(self): with app.app_context(): t =", "def test_local_branches(self): with app.app_context(): t = get_tree(\"local-branches\") self.assertIsNotNone(t) def test_remote_branches(self):", "airflow_code_editor.tree import ( get_tree, ) assert airflow.plugins_manager app = Flask(__name__)", "'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) def test_tree(self): with app.app_context(): t =", "def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory',", "test_tags(self): with app.app_context(): t = get_tree(\"tags\") self.assertIsNotNone(t) def test_local_branches(self): with", "get_tree, ) assert airflow.plugins_manager app = Flask(__name__) class TestTree(TestCase): def", "configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) def test_tree(self): with app.app_context(): t = get_tree()", "self.assertIsNotNone(t) def test_remote_branches(self): with app.app_context(): t = get_tree(\"remote-branches\") self.assertIsNotNone(t) def", "= get_tree(\"files\") self.assertTrue( len([x.get('id') for x in t if x.get('id')", "def test_remote_branches(self): with app.app_context(): t = get_tree(\"remote-branches\") self.assertIsNotNone(t) def test_files(self):", "> 0) self.assertTrue('git' not in (x['id'] for x in t))", "= get_tree(\"local-branches\") self.assertEqual(t, []) t = get_tree(\"remote-branches\") self.assertEqual(t, []) t", "t = get_tree(\"files\") self.assertTrue( len([x.get('id') for x in t if", "in t if x.get('id') == '1']) == 1) def test_git(self):", "python import os import os.path import airflow import airflow.plugins_manager from", "if x.get('id') == '1']) == 1) def test_git(self): with app.app_context():", "is not None) class TestTreeGitDisabled(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__))", "test_local_branches(self): with app.app_context(): t = get_tree(\"local-branches\") self.assertIsNotNone(t) def test_remote_branches(self): with", "with app.app_context(): t = get_tree(\"files\") self.assertTrue( len([x.get('id') for x in", "= get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' in (x['id'] for x", "airflow import airflow.plugins_manager from airflow import configuration from flask import", "== 1) def test_git(self): with app.app_context(): t = get_tree(\"git/HEAD\") self.assertTrue(t", "'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) configuration.conf.set(PLUGIN_NAME, 'git_enabled', 'False') def test_tree(self):", "'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) configuration.conf.set(PLUGIN_NAME, 'git_enabled', 'False') def test_tree(self): with", "len([x.get('id') for x in t if x.get('id') == 'test_utils.py']) ==", "with app.app_context(): t = get_tree(\"remote-branches\") self.assertIsNotNone(t) def test_files(self): with app.app_context():", "from airflow_code_editor.commons import PLUGIN_NAME from airflow_code_editor.tree import ( get_tree, )", "t = get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' in (x['id'] for", "os.path import airflow import airflow.plugins_manager from airflow import configuration from", "'git_enabled', 'False') def test_tree(self): with app.app_context(): t = get_tree() self.assertTrue(len(t)", "import airflow import airflow.plugins_manager from airflow import configuration from flask", "with app.app_context(): t = get_tree(\"tags\") self.assertIsNotNone(t) def test_local_branches(self): with app.app_context():", "self.assertEqual(t, []) t = get_tree(\"files\") self.assertTrue( len([x.get('id') for x in", "in t if x.get('id') == '1']) == 1) if __name__", "get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' in (x['id'] for x in", "(x['id'] for x in t)) t = get_tree(\"tags\") self.assertEqual(t, [])", "<gh_stars>100-1000 #!/usr/bin/env python import os import os.path import airflow import", "( get_tree, ) assert airflow.plugins_manager app = Flask(__name__) class TestTree(TestCase):", "t if x.get('id') == 'test_utils.py']) == 1 ) t =", "t)) def test_tags(self): with app.app_context(): t = get_tree(\"tags\") self.assertIsNotNone(t) def", "for x in t)) def test_tags(self): with app.app_context(): t =", "for x in t if x.get('id') == 'test_utils.py']) == 1", "def test_tags(self): with app.app_context(): t = get_tree(\"tags\") self.assertIsNotNone(t) def test_local_branches(self):", "x in t if x.get('id') == '1']) == 1) def", "[]) t = get_tree(\"files\") self.assertTrue( len([x.get('id') for x in t", "app.app_context(): t = get_tree(\"git/HEAD\") self.assertTrue(t is not None) class TestTreeGitDisabled(TestCase):", "import Flask from unittest import TestCase, main from airflow_code_editor.commons import", "app.app_context(): t = get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' in (x['id']", "x in t)) def test_tags(self): with app.app_context(): t = get_tree(\"tags\")", "if x.get('id') == '1']) == 1) if __name__ == '__main__':", "configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) configuration.conf.set(PLUGIN_NAME, 'git_enabled', 'False') def", "get_tree(\"tags\") self.assertEqual(t, []) t = get_tree(\"local-branches\") self.assertEqual(t, []) t =", "assert airflow.plugins_manager app = Flask(__name__) class TestTree(TestCase): def setUp(self): self.root_dir", "test_git(self): with app.app_context(): t = get_tree(\"git/HEAD\") self.assertTrue(t is not None)", "in t if x.get('id') == 'test_utils.py']) == 1 ) t", "= Flask(__name__) class TestTree(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME,", "= os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) def test_tree(self):", "t)) t = get_tree(\"tags\") self.assertEqual(t, []) t = get_tree(\"local-branches\") self.assertEqual(t,", "t = get_tree(\"remote-branches\") self.assertIsNotNone(t) def test_files(self): with app.app_context(): t =", "import airflow.plugins_manager from airflow import configuration from flask import Flask", "t = get_tree(\"tags\") self.assertIsNotNone(t) def test_local_branches(self): with app.app_context(): t =", "get_tree(\"local-branches\") self.assertIsNotNone(t) def test_remote_branches(self): with app.app_context(): t = get_tree(\"remote-branches\") self.assertIsNotNone(t)", "self.assertIsNotNone(t) def test_files(self): with app.app_context(): t = get_tree(\"files\") self.assertTrue( len([x.get('id')", "x in t if x.get('id') == 'test_utils.py']) == 1 )", "= get_tree(\"remote-branches\") self.assertIsNotNone(t) def test_files(self): with app.app_context(): t = get_tree(\"files\")", "self.root_dir) def test_tree(self): with app.app_context(): t = get_tree() self.assertTrue(len(t) >", "configuration from flask import Flask from unittest import TestCase, main", "airflow_code_editor.commons import PLUGIN_NAME from airflow_code_editor.tree import ( get_tree, ) assert", "> 0) self.assertTrue('git' in (x['id'] for x in t)) def", "x in t)) t = get_tree(\"tags\") self.assertEqual(t, []) t =", "app.app_context(): t = get_tree(\"local-branches\") self.assertIsNotNone(t) def test_remote_branches(self): with app.app_context(): t", "app.app_context(): t = get_tree(\"tags\") self.assertIsNotNone(t) def test_local_branches(self): with app.app_context(): t", "1) def test_git(self): with app.app_context(): t = get_tree(\"git/HEAD\") self.assertTrue(t is", "class TestTreeGitDisabled(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False')", "t = get_tree(\"remote-branches\") self.assertEqual(t, []) t = get_tree(\"files\") self.assertTrue( len([x.get('id')", "import PLUGIN_NAME from airflow_code_editor.tree import ( get_tree, ) assert airflow.plugins_manager", "in t)) def test_tags(self): with app.app_context(): t = get_tree(\"tags\") self.assertIsNotNone(t)", "for x in t if x.get('id') == '1']) == 1)", "self.assertTrue(len([x.get('id') for x in t if x.get('id') == '1']) ==", "import ( get_tree, ) assert airflow.plugins_manager app = Flask(__name__) class", "get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' not in (x['id'] for x", "1 ) t = get_tree(\"files/folder\") self.assertTrue(len([x.get('id') for x in t", "self.assertTrue(len(t) > 0) self.assertTrue('git' not in (x['id'] for x in", "from airflow_code_editor.tree import ( get_tree, ) assert airflow.plugins_manager app =", "PLUGIN_NAME from airflow_code_editor.tree import ( get_tree, ) assert airflow.plugins_manager app", "'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) def test_tree(self): with app.app_context(): t", "= get_tree(\"local-branches\") self.assertIsNotNone(t) def test_remote_branches(self): with app.app_context(): t = get_tree(\"remote-branches\")", "self.assertEqual(t, []) t = get_tree(\"remote-branches\") self.assertEqual(t, []) t = get_tree(\"files\")", "x.get('id') == '1']) == 1) def test_git(self): with app.app_context(): t", "self.assertIsNotNone(t) def test_local_branches(self): with app.app_context(): t = get_tree(\"local-branches\") self.assertIsNotNone(t) def", "= get_tree(\"files/folder\") self.assertTrue(len([x.get('id') for x in t if x.get('id') ==", "os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) configuration.conf.set(PLUGIN_NAME, 'git_enabled', 'False')", "test_tree(self): with app.app_context(): t = get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git'", "setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir)", "TestTreeGitDisabled(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME,", "os import os.path import airflow import airflow.plugins_manager from airflow import", "t = get_tree(\"git/HEAD\") self.assertTrue(t is not None) class TestTreeGitDisabled(TestCase): def", "get_tree(\"tags\") self.assertIsNotNone(t) def test_local_branches(self): with app.app_context(): t = get_tree(\"local-branches\") self.assertIsNotNone(t)", "'root_directory', self.root_dir) configuration.conf.set(PLUGIN_NAME, 'git_enabled', 'False') def test_tree(self): with app.app_context(): t", "x.get('id') == '1']) == 1) if __name__ == '__main__': main()", "x in t if x.get('id') == '1']) == 1) if", "t if x.get('id') == '1']) == 1) if __name__ ==", "flask import Flask from unittest import TestCase, main from airflow_code_editor.commons", "self.assertTrue(len(t) > 0) self.assertTrue('git' in (x['id'] for x in t))", "configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) def test_tree(self): with app.app_context():", "t = get_tree(\"files/folder\") self.assertTrue(len([x.get('id') for x in t if x.get('id')", "TestCase, main from airflow_code_editor.commons import PLUGIN_NAME from airflow_code_editor.tree import (", "class TestTree(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False')", "self.assertTrue( len([x.get('id') for x in t if x.get('id') == 'test_utils.py'])", "app = Flask(__name__) class TestTree(TestCase): def setUp(self): self.root_dir = os.path.dirname(os.path.realpath(__file__))", "airflow.plugins_manager app = Flask(__name__) class TestTree(TestCase): def setUp(self): self.root_dir =", "x.get('id') == 'test_utils.py']) == 1 ) t = get_tree(\"files/folder\") self.assertTrue(len([x.get('id')", "app.app_context(): t = get_tree(\"remote-branches\") self.assertIsNotNone(t) def test_files(self): with app.app_context(): t", "for x in t)) t = get_tree(\"tags\") self.assertEqual(t, []) t", "== '1']) == 1) def test_git(self): with app.app_context(): t =", "configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) configuration.conf.set(PLUGIN_NAME, 'git_enabled', 'False') def test_tree(self): with app.app_context():", "not in (x['id'] for x in t)) t = get_tree(\"tags\")", "self.assertEqual(t, []) t = get_tree(\"local-branches\") self.assertEqual(t, []) t = get_tree(\"remote-branches\")", "= get_tree() self.assertTrue(len(t) > 0) self.assertTrue('git' not in (x['id'] for", "= get_tree(\"remote-branches\") self.assertEqual(t, []) t = get_tree(\"files\") self.assertTrue( len([x.get('id') for", "in (x['id'] for x in t)) def test_tags(self): with app.app_context():", "get_tree(\"local-branches\") self.assertEqual(t, []) t = get_tree(\"remote-branches\") self.assertEqual(t, []) t =", "self.root_dir = os.path.dirname(os.path.realpath(__file__)) configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False') configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir) def", "'1']) == 1) def test_git(self): with app.app_context(): t = get_tree(\"git/HEAD\")", "def test_tree(self): with app.app_context(): t = get_tree() self.assertTrue(len(t) > 0)" ]
[ "= \"super-secret\" # Change this! jwt = JWTManager(app) # Standard", "a fresh access token and # a refresh token @app.route(\"/login\",", "to be used if we need to # make a", "methods=[\"GET\"]) @fresh_jwt_required async def protected_fresh(): username = get_jwt_identity() return dict(fresh_logged_in_as=username),", "password != \"<PASSWORD>\": return {\"msg\": \"Bad username or password\"}, 401", "token and # a refresh token @app.route(\"/login\", methods=[\"POST\"]) async def", "endpoint. This will generate a new access token from #", "entirely defeats their point. @app.route(\"/fresh-login\", methods=[\"POST\"]) async def fresh_login(): username", "endpoint, # this will only return a new access token,", "argument, # which marks the token as fresh or non-fresh", "from quart_jwt_extended import ( JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity,", "create_refresh_token, get_jwt_identity, fresh_jwt_required, ) app = Quart(__name__) app.config[\"JWT_SECRET_KEY\"] = \"super-secret\"", "password = (await request.get_json()).get(\"password\", None) if username != \"test\" or", "or password\"}, 401 new_token = create_access_token(identity=username, fresh=True) ret = {\"access_token\":", "not actually verify a password in this endpoint. @app.route(\"/refresh\", methods=[\"POST\"])", "be used if we need to # make a fresh", "this! jwt = JWTManager(app) # Standard login endpoint. Will return", "200 # Fresh login endpoint. This is designed to be", "to # make a fresh token for a user (by", "( JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required, ) app", "Change this! jwt = JWTManager(app) # Standard login endpoint. Will", "the standard login endpoint, # this will only return a", "a new access token, so that we don't keep #", "# Change this! jwt = JWTManager(app) # Standard login endpoint.", "create_access_token(identity=username, fresh=True) ret = {\"access_token\": new_token} return ret, 200 #", "we are # going to mark the token as fresh", "# correct username and password). Unlike the standard login endpoint,", "need to # make a fresh token for a user", "we just verified their username and password, we are #", "fresh or non-fresh accordingly. # As we just verified their", "methods=[\"POST\"]) async def fresh_login(): username = (await request.get_json()).get(\"username\", None) password", "the refresh token, but will mark that access token as", "token from # the refresh token, but will mark that", "the token as fresh or non-fresh accordingly. # As we", "and password, we are # going to mark the token", "are # going to mark the token as fresh here.", "@app.route(\"/refresh\", methods=[\"POST\"]) @jwt_refresh_token_required async def refresh(): current_user = get_jwt_identity() new_token", "fresh token for a user (by verifying they have the", "username and password). Unlike the standard login endpoint, # this", "protected_fresh(): username = get_jwt_identity() return dict(fresh_logged_in_as=username), 200 if __name__ ==", "or password\"}, 401 # create_access_token supports an optional 'fresh' argument,", "quart import Quart, jsonify, request from quart_jwt_extended import ( JWTManager,", "Refresh token endpoint. This will generate a new access token", "\"access_token\": create_access_token(identity=username, fresh=True), \"refresh_token\": create_refresh_token(identity=username), } return ret, 200 #", "jwt = JWTManager(app) # Standard login endpoint. Will return a", "def protected(): username = get_jwt_identity() return dict(logged_in_as=username), 200 # Only", "token as non-fresh, # as we do not actually verify", "fresh here. ret = { \"access_token\": create_access_token(identity=username, fresh=True), \"refresh_token\": create_refresh_token(identity=username),", "non-fresh, # as we do not actually verify a password", "# a refresh token @app.route(\"/login\", methods=[\"POST\"]) async def login(): username", "200 # Only fresh JWTs can access this endpoint @app.route(\"/protected-fresh\",", "= JWTManager(app) # Standard login endpoint. Will return a fresh", "endpoint. Will return a fresh access token and # a", "return {\"msg\": \"Bad username or password\"}, 401 new_token = create_access_token(identity=username,", "\"super-secret\" # Change this! jwt = JWTManager(app) # Standard login", "get_jwt_identity() return dict(logged_in_as=username), 200 # Only fresh JWTs can access", "to mark the token as fresh here. ret = {", "we do not actually verify a password in this endpoint.", "# make a fresh token for a user (by verifying", "import ( JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required, )", "access token as non-fresh, # as we do not actually", "# generating new refresh tokens, which entirely defeats their point.", "their username and password, we are # going to mark", "this endpoint. @app.route(\"/refresh\", methods=[\"POST\"]) @jwt_refresh_token_required async def refresh(): current_user =", "new_token = create_access_token(identity=current_user, fresh=False) ret = {\"access_token\": new_token} return ret,", "= get_jwt_identity() new_token = create_access_token(identity=current_user, fresh=False) ret = {\"access_token\": new_token}", "login endpoint. This is designed to be used if we", "a user (by verifying they have the # correct username", "{\"access_token\": new_token} return ret, 200 # Any valid JWT can", "as fresh here. ret = { \"access_token\": create_access_token(identity=username, fresh=True), \"refresh_token\":", "new_token} return ret, 200 # Fresh login endpoint. This is", "valid JWT can access this endpoint @app.route(\"/protected\", methods=[\"GET\"]) @jwt_required async", "accordingly. # As we just verified their username and password,", "} return ret, 200 # Refresh token endpoint. This will", "fresh_jwt_required, ) app = Quart(__name__) app.config[\"JWT_SECRET_KEY\"] = \"super-secret\" # Change", "request.get_json()).get(\"password\", None) if username != \"test\" or password != \"<PASSWORD>\":", "\"<PASSWORD>\": return {\"msg\": \"Bad username or password\"}, 401 new_token =", "protected(): username = get_jwt_identity() return dict(logged_in_as=username), 200 # Only fresh", "current_user = get_jwt_identity() new_token = create_access_token(identity=current_user, fresh=False) ret = {\"access_token\":", "JWT can access this endpoint @app.route(\"/protected\", methods=[\"GET\"]) @jwt_required async def", "\"<PASSWORD>\": return {\"msg\": \"Bad username or password\"}, 401 # create_access_token", "# this will only return a new access token, so", "'fresh' argument, # which marks the token as fresh or", "quart_jwt_extended import ( JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required,", "a fresh token for a user (by verifying they have", "access token, so that we don't keep # generating new", "import Quart, jsonify, request from quart_jwt_extended import ( JWTManager, jwt_required,", "Only fresh JWTs can access this endpoint @app.route(\"/protected-fresh\", methods=[\"GET\"]) @fresh_jwt_required", "def refresh(): current_user = get_jwt_identity() new_token = create_access_token(identity=current_user, fresh=False) ret", "password\"}, 401 new_token = create_access_token(identity=username, fresh=True) ret = {\"access_token\": new_token}", "= get_jwt_identity() return dict(logged_in_as=username), 200 # Only fresh JWTs can", "Unlike the standard login endpoint, # this will only return", "for a user (by verifying they have the # correct", "return a new access token, so that we don't keep", "# Only fresh JWTs can access this endpoint @app.route(\"/protected-fresh\", methods=[\"GET\"])", "as non-fresh, # as we do not actually verify a", "= {\"access_token\": new_token} return ret, 200 # Fresh login endpoint.", "correct username and password). Unlike the standard login endpoint, #", "and password). Unlike the standard login endpoint, # this will", "ret = {\"access_token\": new_token} return ret, 200 # Any valid", "# the refresh token, but will mark that access token", "verify a password in this endpoint. @app.route(\"/refresh\", methods=[\"POST\"]) @jwt_refresh_token_required async", "@jwt_required async def protected(): username = get_jwt_identity() return dict(logged_in_as=username), 200", "can access this endpoint @app.route(\"/protected-fresh\", methods=[\"GET\"]) @fresh_jwt_required async def protected_fresh():", "endpoint @app.route(\"/protected\", methods=[\"GET\"]) @jwt_required async def protected(): username = get_jwt_identity()", "username = get_jwt_identity() return dict(logged_in_as=username), 200 # Only fresh JWTs", "going to mark the token as fresh here. ret =", "@jwt_refresh_token_required async def refresh(): current_user = get_jwt_identity() new_token = create_access_token(identity=current_user,", "None) if username != \"test\" or password != \"<PASSWORD>\": return", "= create_access_token(identity=current_user, fresh=False) ret = {\"access_token\": new_token} return ret, 200", "this endpoint @app.route(\"/protected-fresh\", methods=[\"GET\"]) @fresh_jwt_required async def protected_fresh(): username =", "the token as fresh here. ret = { \"access_token\": create_access_token(identity=username,", "200 # Refresh token endpoint. This will generate a new", "endpoint. This is designed to be used if we need", "that we don't keep # generating new refresh tokens, which", "make a fresh token for a user (by verifying they", "is designed to be used if we need to #", "in this endpoint. @app.route(\"/refresh\", methods=[\"POST\"]) @jwt_refresh_token_required async def refresh(): current_user", "# create_access_token supports an optional 'fresh' argument, # which marks", "Will return a fresh access token and # a refresh", "password). Unlike the standard login endpoint, # this will only", "this will only return a new access token, so that", "used if we need to # make a fresh token", "(await request.get_json()).get(\"password\", None) if username != \"test\" or password !=", "we don't keep # generating new refresh tokens, which entirely", "as fresh or non-fresh accordingly. # As we just verified", "that access token as non-fresh, # as we do not", "user (by verifying they have the # correct username and", "= (await request.get_json()).get(\"password\", None) if username != \"test\" or password", "here. ret = { \"access_token\": create_access_token(identity=username, fresh=True), \"refresh_token\": create_refresh_token(identity=username), }", "# Fresh login endpoint. This is designed to be used", "(await request.get_json()).get(\"username\", None) password = (await request.get_json()).get(\"password\", None) if username", "they have the # correct username and password). Unlike the", "fresh=True) ret = {\"access_token\": new_token} return ret, 200 # Any", "new_token} return ret, 200 # Any valid JWT can access", "which entirely defeats their point. @app.route(\"/fresh-login\", methods=[\"POST\"]) async def fresh_login():", "{\"msg\": \"Bad username or password\"}, 401 new_token = create_access_token(identity=username, fresh=True)", "do not actually verify a password in this endpoint. @app.route(\"/refresh\",", "verifying they have the # correct username and password). Unlike", "non-fresh accordingly. # As we just verified their username and", "401 # create_access_token supports an optional 'fresh' argument, # which", "create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required, ) app = Quart(__name__) app.config[\"JWT_SECRET_KEY\"]", "keep # generating new refresh tokens, which entirely defeats their", "\"Bad username or password\"}, 401 new_token = create_access_token(identity=username, fresh=True) ret", "None) password = (await request.get_json()).get(\"password\", None) if username != \"test\"", "if username != \"test\" or password != \"<PASSWORD>\": return {\"msg\":", "methods=[\"POST\"]) async def login(): username = (await request.get_json()).get(\"username\", None) password", "login endpoint. Will return a fresh access token and #", "request.get_json()).get(\"username\", None) password = (await request.get_json()).get(\"password\", None) if username !=", "the # correct username and password). Unlike the standard login", "Fresh login endpoint. This is designed to be used if", "username = (await request.get_json()).get(\"username\", None) password = (await request.get_json()).get(\"password\", None)", "fresh JWTs can access this endpoint @app.route(\"/protected-fresh\", methods=[\"GET\"]) @fresh_jwt_required async", "only return a new access token, so that we don't", "refresh token @app.route(\"/login\", methods=[\"POST\"]) async def login(): username = (await", "JWTManager(app) # Standard login endpoint. Will return a fresh access", "as we do not actually verify a password in this", "actually verify a password in this endpoint. @app.route(\"/refresh\", methods=[\"POST\"]) @jwt_refresh_token_required", "will only return a new access token, so that we", "create_access_token(identity=username, fresh=True), \"refresh_token\": create_refresh_token(identity=username), } return ret, 200 # Refresh", "password, we are # going to mark the token as", "Quart, jsonify, request from quart_jwt_extended import ( JWTManager, jwt_required, create_access_token,", "!= \"test\" or password != \"<PASSWORD>\": return {\"msg\": \"Bad username", "get_jwt_identity() new_token = create_access_token(identity=current_user, fresh=False) ret = {\"access_token\": new_token} return", "new refresh tokens, which entirely defeats their point. @app.route(\"/fresh-login\", methods=[\"POST\"])", "def protected_fresh(): username = get_jwt_identity() return dict(fresh_logged_in_as=username), 200 if __name__", "endpoint @app.route(\"/protected-fresh\", methods=[\"GET\"]) @fresh_jwt_required async def protected_fresh(): username = get_jwt_identity()", "def fresh_login(): username = (await request.get_json()).get(\"username\", None) password = (await", "mark the token as fresh here. ret = { \"access_token\":", "new access token, so that we don't keep # generating", "= Quart(__name__) app.config[\"JWT_SECRET_KEY\"] = \"super-secret\" # Change this! jwt =", "app = Quart(__name__) app.config[\"JWT_SECRET_KEY\"] = \"super-secret\" # Change this! jwt", "@app.route(\"/protected-fresh\", methods=[\"GET\"]) @fresh_jwt_required async def protected_fresh(): username = get_jwt_identity() return", "# going to mark the token as fresh here. ret", "standard login endpoint, # this will only return a new", "token, so that we don't keep # generating new refresh", "= { \"access_token\": create_access_token(identity=username, fresh=True), \"refresh_token\": create_refresh_token(identity=username), } return ret,", "\"test\" or password != \"<PASSWORD>\": return {\"msg\": \"Bad username or", "new_token = create_access_token(identity=username, fresh=True) ret = {\"access_token\": new_token} return ret,", "# Any valid JWT can access this endpoint @app.route(\"/protected\", methods=[\"GET\"])", "!= \"<PASSWORD>\": return {\"msg\": \"Bad username or password\"}, 401 new_token", "async def protected_fresh(): username = get_jwt_identity() return dict(fresh_logged_in_as=username), 200 if", "{\"access_token\": new_token} return ret, 200 # Fresh login endpoint. This", "create_access_token(identity=current_user, fresh=False) ret = {\"access_token\": new_token} return ret, 200 #", "username != \"test\" or password != \"<PASSWORD>\": return {\"msg\": \"Bad", "ret = {\"access_token\": new_token} return ret, 200 # Fresh login", "# as we do not actually verify a password in", "and # a refresh token @app.route(\"/login\", methods=[\"POST\"]) async def login():", "def login(): username = (await request.get_json()).get(\"username\", None) password = (await", "can access this endpoint @app.route(\"/protected\", methods=[\"GET\"]) @jwt_required async def protected():", "Quart(__name__) app.config[\"JWT_SECRET_KEY\"] = \"super-secret\" # Change this! jwt = JWTManager(app)", "# As we just verified their username and password, we", "= get_jwt_identity() return dict(fresh_logged_in_as=username), 200 if __name__ == \"__main__\": app.run()", "a refresh token @app.route(\"/login\", methods=[\"POST\"]) async def login(): username =", "but will mark that access token as non-fresh, # as", "This is designed to be used if we need to", "ret, 200 # Fresh login endpoint. This is designed to", "token for a user (by verifying they have the #", "from # the refresh token, but will mark that access", "login endpoint, # this will only return a new access", "Any valid JWT can access this endpoint @app.route(\"/protected\", methods=[\"GET\"]) @jwt_required", "# which marks the token as fresh or non-fresh accordingly.", "get_jwt_identity, fresh_jwt_required, ) app = Quart(__name__) app.config[\"JWT_SECRET_KEY\"] = \"super-secret\" #", "just verified their username and password, we are # going", "\"refresh_token\": create_refresh_token(identity=username), } return ret, 200 # Refresh token endpoint.", "async def login(): username = (await request.get_json()).get(\"username\", None) password =", "or non-fresh accordingly. # As we just verified their username", "verified their username and password, we are # going to", "This will generate a new access token from # the", "async def fresh_login(): username = (await request.get_json()).get(\"username\", None) password =", "ret, 200 # Any valid JWT can access this endpoint", "{ \"access_token\": create_access_token(identity=username, fresh=True), \"refresh_token\": create_refresh_token(identity=username), } return ret, 200", "password\"}, 401 # create_access_token supports an optional 'fresh' argument, #", "ret, 200 # Refresh token endpoint. This will generate a", "jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required, ) app = Quart(__name__) app.config[\"JWT_SECRET_KEY\"] =", "tokens, which entirely defeats their point. @app.route(\"/fresh-login\", methods=[\"POST\"]) async def", "generating new refresh tokens, which entirely defeats their point. @app.route(\"/fresh-login\",", "username or password\"}, 401 # create_access_token supports an optional 'fresh'", "methods=[\"POST\"]) @jwt_refresh_token_required async def refresh(): current_user = get_jwt_identity() new_token =", "if we need to # make a fresh token for", "access token from # the refresh token, but will mark", "login(): username = (await request.get_json()).get(\"username\", None) password = (await request.get_json()).get(\"password\",", "401 new_token = create_access_token(identity=username, fresh=True) ret = {\"access_token\": new_token} return", "= create_access_token(identity=username, fresh=True) ret = {\"access_token\": new_token} return ret, 200", "return ret, 200 # Any valid JWT can access this", "(by verifying they have the # correct username and password).", "async def protected(): username = get_jwt_identity() return dict(logged_in_as=username), 200 #", "jsonify, request from quart_jwt_extended import ( JWTManager, jwt_required, create_access_token, jwt_refresh_token_required,", "fresh access token and # a refresh token @app.route(\"/login\", methods=[\"POST\"])", "a new access token from # the refresh token, but", "a password in this endpoint. @app.route(\"/refresh\", methods=[\"POST\"]) @jwt_refresh_token_required async def", "refresh(): current_user = get_jwt_identity() new_token = create_access_token(identity=current_user, fresh=False) ret =", "methods=[\"GET\"]) @jwt_required async def protected(): username = get_jwt_identity() return dict(logged_in_as=username),", "fresh=True), \"refresh_token\": create_refresh_token(identity=username), } return ret, 200 # Refresh token", "create_refresh_token(identity=username), } return ret, 200 # Refresh token endpoint. This", "marks the token as fresh or non-fresh accordingly. # As", "supports an optional 'fresh' argument, # which marks the token", "designed to be used if we need to # make", "access this endpoint @app.route(\"/protected-fresh\", methods=[\"GET\"]) @fresh_jwt_required async def protected_fresh(): username", "return dict(logged_in_as=username), 200 # Only fresh JWTs can access this", "access token and # a refresh token @app.route(\"/login\", methods=[\"POST\"]) async", "or password != \"<PASSWORD>\": return {\"msg\": \"Bad username or password\"},", "have the # correct username and password). Unlike the standard", "endpoint. @app.route(\"/refresh\", methods=[\"POST\"]) @jwt_refresh_token_required async def refresh(): current_user = get_jwt_identity()", "username and password, we are # going to mark the", "token as fresh here. ret = { \"access_token\": create_access_token(identity=username, fresh=True),", "JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required, ) app =", "new access token from # the refresh token, but will", "this endpoint @app.route(\"/protected\", methods=[\"GET\"]) @jwt_required async def protected(): username =", "their point. @app.route(\"/fresh-login\", methods=[\"POST\"]) async def fresh_login(): username = (await", "async def refresh(): current_user = get_jwt_identity() new_token = create_access_token(identity=current_user, fresh=False)", "As we just verified their username and password, we are", "token @app.route(\"/login\", methods=[\"POST\"]) async def login(): username = (await request.get_json()).get(\"username\",", "will mark that access token as non-fresh, # as we", "password in this endpoint. @app.route(\"/refresh\", methods=[\"POST\"]) @jwt_refresh_token_required async def refresh():", "from quart import Quart, jsonify, request from quart_jwt_extended import (", "return {\"msg\": \"Bad username or password\"}, 401 # create_access_token supports", "@fresh_jwt_required async def protected_fresh(): username = get_jwt_identity() return dict(fresh_logged_in_as=username), 200", "= (await request.get_json()).get(\"username\", None) password = (await request.get_json()).get(\"password\", None) if", "generate a new access token from # the refresh token,", "@app.route(\"/fresh-login\", methods=[\"POST\"]) async def fresh_login(): username = (await request.get_json()).get(\"username\", None)", "!= \"<PASSWORD>\": return {\"msg\": \"Bad username or password\"}, 401 #", "{\"msg\": \"Bad username or password\"}, 401 # create_access_token supports an", "# Standard login endpoint. Will return a fresh access token", "= {\"access_token\": new_token} return ret, 200 # Any valid JWT", "jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token, get_jwt_identity, fresh_jwt_required, ) app = Quart(__name__)", "so that we don't keep # generating new refresh tokens,", "@app.route(\"/protected\", methods=[\"GET\"]) @jwt_required async def protected(): username = get_jwt_identity() return", "token, but will mark that access token as non-fresh, #", "app.config[\"JWT_SECRET_KEY\"] = \"super-secret\" # Change this! jwt = JWTManager(app) #", "return ret, 200 # Refresh token endpoint. This will generate", "refresh token, but will mark that access token as non-fresh,", "mark that access token as non-fresh, # as we do", "defeats their point. @app.route(\"/fresh-login\", methods=[\"POST\"]) async def fresh_login(): username =", "an optional 'fresh' argument, # which marks the token as", "we need to # make a fresh token for a", "optional 'fresh' argument, # which marks the token as fresh", "will generate a new access token from # the refresh", "access this endpoint @app.route(\"/protected\", methods=[\"GET\"]) @jwt_required async def protected(): username", "200 # Any valid JWT can access this endpoint @app.route(\"/protected\",", "return a fresh access token and # a refresh token", "username or password\"}, 401 new_token = create_access_token(identity=username, fresh=True) ret =", "point. @app.route(\"/fresh-login\", methods=[\"POST\"]) async def fresh_login(): username = (await request.get_json()).get(\"username\",", "fresh_login(): username = (await request.get_json()).get(\"username\", None) password = (await request.get_json()).get(\"password\",", "create_access_token supports an optional 'fresh' argument, # which marks the", "dict(logged_in_as=username), 200 # Only fresh JWTs can access this endpoint", "JWTs can access this endpoint @app.route(\"/protected-fresh\", methods=[\"GET\"]) @fresh_jwt_required async def", "username = get_jwt_identity() return dict(fresh_logged_in_as=username), 200 if __name__ == \"__main__\":", "which marks the token as fresh or non-fresh accordingly. #", "# Refresh token endpoint. This will generate a new access", "don't keep # generating new refresh tokens, which entirely defeats", "Standard login endpoint. Will return a fresh access token and", "return ret, 200 # Fresh login endpoint. This is designed", "refresh tokens, which entirely defeats their point. @app.route(\"/fresh-login\", methods=[\"POST\"]) async", "token as fresh or non-fresh accordingly. # As we just", "ret = { \"access_token\": create_access_token(identity=username, fresh=True), \"refresh_token\": create_refresh_token(identity=username), } return", "request from quart_jwt_extended import ( JWTManager, jwt_required, create_access_token, jwt_refresh_token_required, create_refresh_token,", ") app = Quart(__name__) app.config[\"JWT_SECRET_KEY\"] = \"super-secret\" # Change this!", "@app.route(\"/login\", methods=[\"POST\"]) async def login(): username = (await request.get_json()).get(\"username\", None)", "fresh=False) ret = {\"access_token\": new_token} return ret, 200 # Fresh", "token endpoint. This will generate a new access token from", "\"Bad username or password\"}, 401 # create_access_token supports an optional" ]
[ "import Any, Dict, Union __all__ = 'AnyDict' AnyDict = Dict[str,", "= Dict[str, Any] # pragma: no mutate datetime_or_str = Union[datetime,", "'AnyDict' AnyDict = Dict[str, Any] # pragma: no mutate datetime_or_str", "Any] # pragma: no mutate datetime_or_str = Union[datetime, str] #", "Union __all__ = 'AnyDict' AnyDict = Dict[str, Any] # pragma:", "typing import Any, Dict, Union __all__ = 'AnyDict' AnyDict =", "pragma: no mutate datetime_or_str = Union[datetime, str] # pragma: no", "# pragma: no mutate datetime_or_str = Union[datetime, str] # pragma:", "Dict, Union __all__ = 'AnyDict' AnyDict = Dict[str, Any] #", "no mutate datetime_or_str = Union[datetime, str] # pragma: no mutate", "import datetime from typing import Any, Dict, Union __all__ =", "= 'AnyDict' AnyDict = Dict[str, Any] # pragma: no mutate", "from datetime import datetime from typing import Any, Dict, Union", "Dict[str, Any] # pragma: no mutate datetime_or_str = Union[datetime, str]", "__all__ = 'AnyDict' AnyDict = Dict[str, Any] # pragma: no", "datetime from typing import Any, Dict, Union __all__ = 'AnyDict'", "datetime import datetime from typing import Any, Dict, Union __all__", "AnyDict = Dict[str, Any] # pragma: no mutate datetime_or_str =", "from typing import Any, Dict, Union __all__ = 'AnyDict' AnyDict", "Any, Dict, Union __all__ = 'AnyDict' AnyDict = Dict[str, Any]" ]
[ "preds = Dense(1,activation='linear')(inputs) model = Model(inputs=inputs,outputs=preds) sgd = keras.optimizers.SGD() model.compile(optimizer=sgd", "= Model(inputs=inputs,outputs=preds) sgd = keras.optimizers.SGD() model.compile(optimizer=sgd ,loss='mse') model.fit(x_,y_, batch_size=1, verbose=1,", "import keras from keras.layers import Input, Dense from keras.models import", "import StandardScaler, MinMaxScaler, Normalizer if __name__ == '__main__': df =", "= 'theano' os.environ['THEANO_FLAGS'] = 'floatX=float32,device=cpu' import cloudpickle as pickle import", "y_ = sc.fit_transform(y) inputs = Input(shape=(1,)) preds = Dense(1,activation='linear')(inputs) model", "= MinMaxScaler(feature_range=(-1,1)) x_ = sc.fit_transform(x) y_ = sc.fit_transform(y) inputs =", "if __name__ == '__main__': df = pd.read_csv(\"../input/training/training.csv\") df[\"People per Television\"]", "df[\"People per Television\"] = pd.to_numeric(df[\"People per Television\"],errors='coerce') df = df.dropna()", "df[\"People per Television\"].values.reshape(-1,1).astype(np.float64) y = df[\"People per Physician\"].values.reshape(-1,1).astype(np.float64) # min-max", "keras.optimizers.SGD() model.compile(optimizer=sgd ,loss='mse') model.fit(x_,y_, batch_size=1, verbose=1, epochs=10, shuffle=False) save_model(model, 'state/keras_theano_linear_model_state.h5')", "import pipeline_invoke import pandas as pd import numpy as np", "np import keras from keras.layers import Input, Dense from keras.models", "'floatX=float32,device=cpu' import cloudpickle as pickle import pipeline_invoke import pandas as", "df = df.dropna() x = df[\"People per Television\"].values.reshape(-1,1).astype(np.float64) y =", "keras from keras.layers import Input, Dense from keras.models import Model", "Normalizer if __name__ == '__main__': df = pd.read_csv(\"../input/training/training.csv\") df[\"People per", "os os.environ['KERAS_BACKEND'] = 'theano' os.environ['THEANO_FLAGS'] = 'floatX=float32,device=cpu' import cloudpickle as", "verbose=1, epochs=10, shuffle=False) save_model(model, 'state/keras_theano_linear_model_state.h5') # model_pkl_path = 'model.pkl' #", "x_ = sc.fit_transform(x) y_ = sc.fit_transform(y) inputs = Input(shape=(1,)) preds", "= sc.fit_transform(x) y_ = sc.fit_transform(y) inputs = Input(shape=(1,)) preds =", "as pd import numpy as np import keras from keras.layers", "import pandas as pd import numpy as np import keras", "inputs = Input(shape=(1,)) preds = Dense(1,activation='linear')(inputs) model = Model(inputs=inputs,outputs=preds) sgd", "Input(shape=(1,)) preds = Dense(1,activation='linear')(inputs) model = Model(inputs=inputs,outputs=preds) sgd = keras.optimizers.SGD()", "keras.layers import Input, Dense from keras.models import Model from keras.models", "keras.models import Model from keras.models import save_model, load_model from sklearn.preprocessing", "import Model from keras.models import save_model, load_model from sklearn.preprocessing import", "epochs=10, shuffle=False) save_model(model, 'state/keras_theano_linear_model_state.h5') # model_pkl_path = 'model.pkl' # with", "pipeline_invoke import pandas as pd import numpy as np import", "df.dropna() x = df[\"People per Television\"].values.reshape(-1,1).astype(np.float64) y = df[\"People per", "min-max -1,1 sc = MinMaxScaler(feature_range=(-1,1)) x_ = sc.fit_transform(x) y_ =", "per Physician\"].values.reshape(-1,1).astype(np.float64) # min-max -1,1 sc = MinMaxScaler(feature_range=(-1,1)) x_ =", "from keras.layers import Input, Dense from keras.models import Model from", "load_model from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer if __name__ ==", "from keras.models import save_model, load_model from sklearn.preprocessing import StandardScaler, MinMaxScaler,", "= pd.read_csv(\"../input/training/training.csv\") df[\"People per Television\"] = pd.to_numeric(df[\"People per Television\"],errors='coerce') df", "= df[\"People per Physician\"].values.reshape(-1,1).astype(np.float64) # min-max -1,1 sc = MinMaxScaler(feature_range=(-1,1))", "'__main__': df = pd.read_csv(\"../input/training/training.csv\") df[\"People per Television\"] = pd.to_numeric(df[\"People per", "pickle import pipeline_invoke import pandas as pd import numpy as", "pd.to_numeric(df[\"People per Television\"],errors='coerce') df = df.dropna() x = df[\"People per", "as pickle import pipeline_invoke import pandas as pd import numpy", "import numpy as np import keras from keras.layers import Input,", "== '__main__': df = pd.read_csv(\"../input/training/training.csv\") df[\"People per Television\"] = pd.to_numeric(df[\"People", "model_pkl_path = 'model.pkl' # with open(model_pkl_path, 'wb') as fh: #", "Model(inputs=inputs,outputs=preds) sgd = keras.optimizers.SGD() model.compile(optimizer=sgd ,loss='mse') model.fit(x_,y_, batch_size=1, verbose=1, epochs=10,", "Television\"],errors='coerce') df = df.dropna() x = df[\"People per Television\"].values.reshape(-1,1).astype(np.float64) y", "'theano' os.environ['THEANO_FLAGS'] = 'floatX=float32,device=cpu' import cloudpickle as pickle import pipeline_invoke", "batch_size=1, verbose=1, epochs=10, shuffle=False) save_model(model, 'state/keras_theano_linear_model_state.h5') # model_pkl_path = 'model.pkl'", "per Television\"].values.reshape(-1,1).astype(np.float64) y = df[\"People per Physician\"].values.reshape(-1,1).astype(np.float64) # min-max -1,1", "model.fit(x_,y_, batch_size=1, verbose=1, epochs=10, shuffle=False) save_model(model, 'state/keras_theano_linear_model_state.h5') # model_pkl_path =", "os.environ['KERAS_BACKEND'] = 'theano' os.environ['THEANO_FLAGS'] = 'floatX=float32,device=cpu' import cloudpickle as pickle", "y = df[\"People per Physician\"].values.reshape(-1,1).astype(np.float64) # min-max -1,1 sc =", "Dense(1,activation='linear')(inputs) model = Model(inputs=inputs,outputs=preds) sgd = keras.optimizers.SGD() model.compile(optimizer=sgd ,loss='mse') model.fit(x_,y_,", "= df.dropna() x = df[\"People per Television\"].values.reshape(-1,1).astype(np.float64) y = df[\"People", "# model_pkl_path = 'model.pkl' # with open(model_pkl_path, 'wb') as fh:", ",loss='mse') model.fit(x_,y_, batch_size=1, verbose=1, epochs=10, shuffle=False) save_model(model, 'state/keras_theano_linear_model_state.h5') # model_pkl_path", "as np import keras from keras.layers import Input, Dense from", "Physician\"].values.reshape(-1,1).astype(np.float64) # min-max -1,1 sc = MinMaxScaler(feature_range=(-1,1)) x_ = sc.fit_transform(x)", "import Input, Dense from keras.models import Model from keras.models import", "import os os.environ['KERAS_BACKEND'] = 'theano' os.environ['THEANO_FLAGS'] = 'floatX=float32,device=cpu' import cloudpickle", "cloudpickle as pickle import pipeline_invoke import pandas as pd import", "pandas as pd import numpy as np import keras from", "StandardScaler, MinMaxScaler, Normalizer if __name__ == '__main__': df = pd.read_csv(\"../input/training/training.csv\")", "keras.models import save_model, load_model from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer", "<gh_stars>10-100 import os os.environ['KERAS_BACKEND'] = 'theano' os.environ['THEANO_FLAGS'] = 'floatX=float32,device=cpu' import", "sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer if __name__ == '__main__': df", "Television\"].values.reshape(-1,1).astype(np.float64) y = df[\"People per Physician\"].values.reshape(-1,1).astype(np.float64) # min-max -1,1 sc", "= pd.to_numeric(df[\"People per Television\"],errors='coerce') df = df.dropna() x = df[\"People", "MinMaxScaler, Normalizer if __name__ == '__main__': df = pd.read_csv(\"../input/training/training.csv\") df[\"People", "'state/keras_theano_linear_model_state.h5') # model_pkl_path = 'model.pkl' # with open(model_pkl_path, 'wb') as", "__name__ == '__main__': df = pd.read_csv(\"../input/training/training.csv\") df[\"People per Television\"] =", "= Input(shape=(1,)) preds = Dense(1,activation='linear')(inputs) model = Model(inputs=inputs,outputs=preds) sgd =", "os.environ['THEANO_FLAGS'] = 'floatX=float32,device=cpu' import cloudpickle as pickle import pipeline_invoke import", "sc.fit_transform(x) y_ = sc.fit_transform(y) inputs = Input(shape=(1,)) preds = Dense(1,activation='linear')(inputs)", "= keras.optimizers.SGD() model.compile(optimizer=sgd ,loss='mse') model.fit(x_,y_, batch_size=1, verbose=1, epochs=10, shuffle=False) save_model(model,", "save_model, load_model from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer if __name__", "Television\"] = pd.to_numeric(df[\"People per Television\"],errors='coerce') df = df.dropna() x =", "df[\"People per Physician\"].values.reshape(-1,1).astype(np.float64) # min-max -1,1 sc = MinMaxScaler(feature_range=(-1,1)) x_", "sc = MinMaxScaler(feature_range=(-1,1)) x_ = sc.fit_transform(x) y_ = sc.fit_transform(y) inputs", "pd import numpy as np import keras from keras.layers import", "per Television\"] = pd.to_numeric(df[\"People per Television\"],errors='coerce') df = df.dropna() x", "Input, Dense from keras.models import Model from keras.models import save_model,", "from keras.models import Model from keras.models import save_model, load_model from", "import cloudpickle as pickle import pipeline_invoke import pandas as pd", "= sc.fit_transform(y) inputs = Input(shape=(1,)) preds = Dense(1,activation='linear')(inputs) model =", "# min-max -1,1 sc = MinMaxScaler(feature_range=(-1,1)) x_ = sc.fit_transform(x) y_", "sc.fit_transform(y) inputs = Input(shape=(1,)) preds = Dense(1,activation='linear')(inputs) model = Model(inputs=inputs,outputs=preds)", "'model.pkl' # with open(model_pkl_path, 'wb') as fh: # pickle.dump(pipeline_invoke, fh)", "model.compile(optimizer=sgd ,loss='mse') model.fit(x_,y_, batch_size=1, verbose=1, epochs=10, shuffle=False) save_model(model, 'state/keras_theano_linear_model_state.h5') #", "shuffle=False) save_model(model, 'state/keras_theano_linear_model_state.h5') # model_pkl_path = 'model.pkl' # with open(model_pkl_path,", "MinMaxScaler(feature_range=(-1,1)) x_ = sc.fit_transform(x) y_ = sc.fit_transform(y) inputs = Input(shape=(1,))", "= 'model.pkl' # with open(model_pkl_path, 'wb') as fh: # pickle.dump(pipeline_invoke,", "save_model(model, 'state/keras_theano_linear_model_state.h5') # model_pkl_path = 'model.pkl' # with open(model_pkl_path, 'wb')", "from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer if __name__ == '__main__':", "df = pd.read_csv(\"../input/training/training.csv\") df[\"People per Television\"] = pd.to_numeric(df[\"People per Television\"],errors='coerce')", "Model from keras.models import save_model, load_model from sklearn.preprocessing import StandardScaler,", "pd.read_csv(\"../input/training/training.csv\") df[\"People per Television\"] = pd.to_numeric(df[\"People per Television\"],errors='coerce') df =", "numpy as np import keras from keras.layers import Input, Dense", "import save_model, load_model from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer if", "sgd = keras.optimizers.SGD() model.compile(optimizer=sgd ,loss='mse') model.fit(x_,y_, batch_size=1, verbose=1, epochs=10, shuffle=False)", "x = df[\"People per Television\"].values.reshape(-1,1).astype(np.float64) y = df[\"People per Physician\"].values.reshape(-1,1).astype(np.float64)", "= Dense(1,activation='linear')(inputs) model = Model(inputs=inputs,outputs=preds) sgd = keras.optimizers.SGD() model.compile(optimizer=sgd ,loss='mse')", "per Television\"],errors='coerce') df = df.dropna() x = df[\"People per Television\"].values.reshape(-1,1).astype(np.float64)", "Dense from keras.models import Model from keras.models import save_model, load_model", "model = Model(inputs=inputs,outputs=preds) sgd = keras.optimizers.SGD() model.compile(optimizer=sgd ,loss='mse') model.fit(x_,y_, batch_size=1,", "-1,1 sc = MinMaxScaler(feature_range=(-1,1)) x_ = sc.fit_transform(x) y_ = sc.fit_transform(y)", "= 'floatX=float32,device=cpu' import cloudpickle as pickle import pipeline_invoke import pandas", "= df[\"People per Television\"].values.reshape(-1,1).astype(np.float64) y = df[\"People per Physician\"].values.reshape(-1,1).astype(np.float64) #" ]
[ "the hardware layer to avoid errors. from ledshimdemo.canvas import Canvas", "unittest.mock import Mock, patch import sys sys.modules['smbus'] = Mock() #", "type: int def test_cheerlight_call(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas)", "= 3 # type: int def test_cheerlight_call(self): canvas = Canvas(self.TEST_CANVAS_SIZE)", "Mock() # Mock the hardware layer to avoid errors. from", "canvas.BLANK_PIXEL) def test_effect_working_cheerlights(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) #", "after in case it changes during the test. before =", "Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) # Must check before and after", "sys.modules['smbus'] = Mock() # Mock the hardware layer to avoid", "for i in range(canvas.get_size()): self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL) def test_effect_working_cheerlights(self): canvas =", "TestCheerLights(TestCase): TEST_CANVAS_SIZE = 3 # type: int def test_cheerlight_call(self): canvas", "# Mock the hardware layer to avoid errors. from ledshimdemo.canvas", "effect = CheerLightsEffect(canvas) effect.compose() patch_function.assert_called_once() for i in range(canvas.get_size()): self.assertEqual(canvas.get_pixel(i),", "# type: int def test_cheerlight_call(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect =", "self.assertIsNone(effect.get_colour_from_channel(\"http://ejiferfneciudwedwojcmeiocnw.com\")) @patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None) def test_effect_failed_cheerlights(self, patch_function): canvas = Canvas(self.TEST_CANVAS_SIZE) effect", "Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) effect.compose() patch_function.assert_called_once() for i in range(canvas.get_size()):", "Mock the hardware layer to avoid errors. from ledshimdemo.canvas import", "= Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) effect.compose() patch_function.assert_called_once() for i in", "layer to avoid errors. from ledshimdemo.canvas import Canvas from ledshimdemo.effects.cheerlights", "@patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None) def test_effect_failed_cheerlights(self, patch_function): canvas = Canvas(self.TEST_CANVAS_SIZE) effect =", "test_effect_failed_cheerlights(self, patch_function): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) effect.compose() patch_function.assert_called_once()", "canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) self.assertIsNone(effect.get_colour_from_channel(\"http://ejiferfneciudwedwojcmeiocnw.com\")) @patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None) def", "CheerLightsEffect class TestCheerLights(TestCase): TEST_CANVAS_SIZE = 3 # type: int def", "= CheerLightsEffect(canvas) effect.compose() patch_function.assert_called_once() for i in range(canvas.get_size()): self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL)", "i in range(canvas.get_size()): self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL) def test_effect_working_cheerlights(self): canvas = Canvas(self.TEST_CANVAS_SIZE)", "effect = CheerLightsEffect(canvas) self.assertIsNone(effect.get_colour_from_channel(\"http://ejiferfneciudwedwojcmeiocnw.com\")) @patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None) def test_effect_failed_cheerlights(self, patch_function): canvas", "check before and after in case it changes during the", "= Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) # Must check before and", "from ledshimdemo.effects.cheerlights import CheerLightsEffect class TestCheerLights(TestCase): TEST_CANVAS_SIZE = 3 #", "= CheerLightsEffect(canvas) # Must check before and after in case", "TEST_CANVAS_SIZE = 3 # type: int def test_cheerlight_call(self): canvas =", "hardware layer to avoid errors. from ledshimdemo.canvas import Canvas from", "canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) effect.compose() patch_function.assert_called_once() for i", "class TestCheerLights(TestCase): TEST_CANVAS_SIZE = 3 # type: int def test_cheerlight_call(self):", "# Must check before and after in case it changes", "case it changes during the test. before = effect.get_colour_from_channel(effect.URL) effect.compose()", "from ledshimdemo.canvas import Canvas from ledshimdemo.effects.cheerlights import CheerLightsEffect class TestCheerLights(TestCase):", "CheerLightsEffect(canvas) # Must check before and after in case it", "= Mock() # Mock the hardware layer to avoid errors.", "test. before = effect.get_colour_from_channel(effect.URL) effect.compose() after = effect.get_colour_from_channel(effect.URL) self.assertRegex(repr(effect), \"^CheerLights\\\\(Colour:({0}|{1})\\\\)$\".format(before,", "unittest import TestCase from unittest.mock import Mock, patch import sys", "and after in case it changes during the test. before", "effect.compose() patch_function.assert_called_once() for i in range(canvas.get_size()): self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL) def test_effect_working_cheerlights(self):", "before and after in case it changes during the test.", "Mock, patch import sys sys.modules['smbus'] = Mock() # Mock the", "Must check before and after in case it changes during", "patch_function): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) effect.compose() patch_function.assert_called_once() for", "patch_function.assert_called_once() for i in range(canvas.get_size()): self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL) def test_effect_working_cheerlights(self): canvas", "def test_effect_working_cheerlights(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) # Must", "it changes during the test. before = effect.get_colour_from_channel(effect.URL) effect.compose() after", "to avoid errors. from ledshimdemo.canvas import Canvas from ledshimdemo.effects.cheerlights import", "int def test_cheerlight_call(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) self.assertIsNone(effect.get_colour_from_channel(\"http://ejiferfneciudwedwojcmeiocnw.com\"))", "import Canvas from ledshimdemo.effects.cheerlights import CheerLightsEffect class TestCheerLights(TestCase): TEST_CANVAS_SIZE =", "patch import sys sys.modules['smbus'] = Mock() # Mock the hardware", "in case it changes during the test. before = effect.get_colour_from_channel(effect.URL)", "avoid errors. from ledshimdemo.canvas import Canvas from ledshimdemo.effects.cheerlights import CheerLightsEffect", "def test_effect_failed_cheerlights(self, patch_function): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) effect.compose()", "changes during the test. before = effect.get_colour_from_channel(effect.URL) effect.compose() after =", "= Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) self.assertIsNone(effect.get_colour_from_channel(\"http://ejiferfneciudwedwojcmeiocnw.com\")) @patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None) def test_effect_failed_cheerlights(self,", "self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL) def test_effect_working_cheerlights(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas)", "def test_cheerlight_call(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) self.assertIsNone(effect.get_colour_from_channel(\"http://ejiferfneciudwedwojcmeiocnw.com\")) @patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel',", "CheerLightsEffect(canvas) effect.compose() patch_function.assert_called_once() for i in range(canvas.get_size()): self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL) def", "import TestCase from unittest.mock import Mock, patch import sys sys.modules['smbus']", "test_effect_working_cheerlights(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) # Must check", "the test. before = effect.get_colour_from_channel(effect.URL) effect.compose() after = effect.get_colour_from_channel(effect.URL) self.assertRegex(repr(effect),", "ledshimdemo.effects.cheerlights import CheerLightsEffect class TestCheerLights(TestCase): TEST_CANVAS_SIZE = 3 # type:", "Canvas from ledshimdemo.effects.cheerlights import CheerLightsEffect class TestCheerLights(TestCase): TEST_CANVAS_SIZE = 3", "import Mock, patch import sys sys.modules['smbus'] = Mock() # Mock", "3 # type: int def test_cheerlight_call(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect", "before = effect.get_colour_from_channel(effect.URL) effect.compose() after = effect.get_colour_from_channel(effect.URL) self.assertRegex(repr(effect), \"^CheerLights\\\\(Colour:({0}|{1})\\\\)$\".format(before, after))", "sys sys.modules['smbus'] = Mock() # Mock the hardware layer to", "= CheerLightsEffect(canvas) self.assertIsNone(effect.get_colour_from_channel(\"http://ejiferfneciudwedwojcmeiocnw.com\")) @patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None) def test_effect_failed_cheerlights(self, patch_function): canvas =", "during the test. before = effect.get_colour_from_channel(effect.URL) effect.compose() after = effect.get_colour_from_channel(effect.URL)", "import CheerLightsEffect class TestCheerLights(TestCase): TEST_CANVAS_SIZE = 3 # type: int", "return_value=None) def test_effect_failed_cheerlights(self, patch_function): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas)", "from unittest import TestCase from unittest.mock import Mock, patch import", "TestCase from unittest.mock import Mock, patch import sys sys.modules['smbus'] =", "from unittest.mock import Mock, patch import sys sys.modules['smbus'] = Mock()", "import sys sys.modules['smbus'] = Mock() # Mock the hardware layer", "errors. from ledshimdemo.canvas import Canvas from ledshimdemo.effects.cheerlights import CheerLightsEffect class", "range(canvas.get_size()): self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL) def test_effect_working_cheerlights(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect =", "test_cheerlight_call(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) self.assertIsNone(effect.get_colour_from_channel(\"http://ejiferfneciudwedwojcmeiocnw.com\")) @patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None)", "Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) self.assertIsNone(effect.get_colour_from_channel(\"http://ejiferfneciudwedwojcmeiocnw.com\")) @patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None) def test_effect_failed_cheerlights(self, patch_function):", "in range(canvas.get_size()): self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL) def test_effect_working_cheerlights(self): canvas = Canvas(self.TEST_CANVAS_SIZE) effect", "effect = CheerLightsEffect(canvas) # Must check before and after in", "canvas = Canvas(self.TEST_CANVAS_SIZE) effect = CheerLightsEffect(canvas) # Must check before", "ledshimdemo.canvas import Canvas from ledshimdemo.effects.cheerlights import CheerLightsEffect class TestCheerLights(TestCase): TEST_CANVAS_SIZE", "CheerLightsEffect(canvas) self.assertIsNone(effect.get_colour_from_channel(\"http://ejiferfneciudwedwojcmeiocnw.com\")) @patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None) def test_effect_failed_cheerlights(self, patch_function): canvas = Canvas(self.TEST_CANVAS_SIZE)" ]
[ "tqdm(chunked)] out_dict = {} for i, d in enumerate(dicts): for", "from tqdm import tqdm import os import tifffile def chunk_list(l,", "'All cells must have the same data elements' out_dict =", "import add_readout_noise, draw_poisson from colicoords import load import numpy as", "shape, max_dist=5): thetas = 360 * np.random.rand(len(cells)) data_list = [cell.data.rotate(theta)", "image.\"\"\" cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5')) out_dict = generate_images(cell_list, 1000,", "= int(np.round(shape[0] * np.random.rand())) min1 = pos_y - int(np.floor(data.shape[0])) max1", "k: v['frame'] = i + 1 if k in out_dict:", "'images', 'storm_inner.txt'), out_dict['storm_inner']) np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner']) def noise_bf(data_dir): \"\"\"add", "20 img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy')) for photons in [10000,", "500]: ratio = 1.0453 # ratio between 'background' (no cells)", "tqdm import tqdm import os import tifffile def chunk_list(l, sizes):", "= np.zeros(shape) temp_binary[min1:max1, min2:max2] = data_cropped.binary_img out_binary = (out_dict['binary'] >", "= i + 1 if k in out_dict: out_dict[k] =", "in k: v['frame'] = i + 1 if k in", "- min1, 0]) d_max1 = np.min([data.shape[0] + (shape[0] - pos_y),", "d_min2:d_max2] # Limit image position to the edges of the", "= load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5')) out_dict = generate_images(cell_list, 1000, 10, 3,", "Limit image position to the edges of the image min1", "= 1 xi = np.arange(step / 2, xmax, step) yi", "for name, dclass in zip(data_list[0].names, data_list[0].dclasses) if dclass != 'storm'}", "< 0) + (data_elem['y'] > ymax) data_out = data_elem[~bools].copy() if", "x, y): img += _int * np.exp(-(((_x - x_coords) /", "oriented in the image.\"\"\" cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5')) out_dict", "for when the cell is on the border of the", "max_dist): continue valid_position = True for name in data.names: data_elem", "= data_out continue elif data_elem.dclass == 'binary': out_dict[name][min1:max1, min2:max2] +=", "np.min([max1, shape[0]]) min2 = np.max([min2, 0]) max2 = np.min([max2, shape[1]])", "s in sizes: result = l[prev:prev+s] prev += s yield", "in out_dict: out_dict[k][i] = v else: out_dict[k] = np.zeros((num_images, *shape))", "(data_elem['x'] > xmax) + (data_elem['y'] < 0) + (data_elem['y'] >", "- int(np.floor(data.shape[0])) max1 = min1 + data.shape[0] min2 = pos_x", "_sigma) ** 2) / 2) return img def gen_im(data_dir): \"\"\"Generate", "img = draw_poisson(img) img = add_readout_noise(img, noise) tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)),", "'brightfield.tif'), out_dict['brightfield']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer'])", "noise to brightfield images\"\"\" noise = 20 img_stack = np.load(os.path.join(data_dir,", "= generate_images(cell_list, 1000, 10, 3, (512, 512)) if not os.path.exists(os.path.join(data_dir,", "def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape): nums = np.round(np.random.normal(cell_per_img, cell_per_img_std,", "tifffile def chunk_list(l, sizes): prev = 0 for s in", "enumerate(data_list): valid_position = False while not valid_position: pos_x = int(np.round(shape[1]", "min1 xmax, ymax = shape[1], shape[0] bools = (data_elem['x'] <", "cell, theta in zip(cells, thetas)] assert all([data.names == data_list[0].names for", "data_elem return out_dict def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3): xmax =", "= pos_x - int(np.floor(data.shape[1])) max2 = min2 + data.shape[1] #", "the image.\"\"\" cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5')) out_dict = generate_images(cell_list,", "< len(cell_list), 'Not enough cells' chunked = [chunk for chunk", "xmax, ymax = shape[1], shape[0] bools = (data_elem['x'] < 0)", "num_images)).astype(int) nums = nums[nums > 0] assert sum(nums) < len(cell_list),", "img = (photons*(ratio-1))*img_stack + photons img = draw_poisson(img) img =", "nums[nums > 0] assert sum(nums) < len(cell_list), 'Not enough cells'", "np.random.rand())) pos_y = int(np.round(shape[0] * np.random.rand())) min1 = pos_y -", "size=len(x)) for _sigma, _int, _x, _y in zip(sigma, intensities, x,", "name in data.names: data_elem = data_cropped.data_dict[name] if data_elem.dclass == 'storm':", "add_readout_noise, draw_poisson from colicoords import load import numpy as np", "in the image.\"\"\" cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5')) out_dict =", "import load import numpy as np import mahotas as mh", "max2 = min2 + data.shape[1] # Crop the data for", "must have the same data elements' out_dict = {name: np.zeros(shape)", "enough cells' chunked = [chunk for chunk in tqdm(chunk_list(cell_list, nums))]", "tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer']) np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner']) np.savetxt(os.path.join(data_dir, 'images',", "= pos_y - int(np.floor(data.shape[0])) max1 = min1 + data.shape[0] min2", "= np.repeat(yi, len(xi)).reshape(len(yi), len(xi)) x, y = storm_table['x'], storm_table['y'] img", "out_dict['foci_inner']) np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer']) np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner']) np.save(os.path.join(data_dir,", "'storm': data_elem['x'] += min2 data_elem['y'] += min1 xmax, ymax =", "out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem) else: out_dict[name][min1:max1, min2:max2] += data_elem return", "x, y = storm_table['x'], storm_table['y'] img = np.zeros_like(x_coords) intensities =", "'foci_outer.tif'), out_dict['foci_outer']) np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner']) np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner'])", "out_dict: out_dict[k][i] = v else: out_dict[k] = np.zeros((num_images, *shape)) out_dict[k][i]", "image min1 = np.max([min1, 0]) max1 = np.min([max1, shape[0]]) min2", "[cell.data.rotate(theta) for cell, theta in zip(cells, thetas)] assert all([data.names ==", "= int(np.round(shape[1] * np.random.rand())) pos_y = int(np.round(shape[0] * np.random.rand())) min1", "load import numpy as np import mahotas as mh from", "out_dict = {} for i, d in enumerate(dicts): for k,", "min1, 0]) d_max1 = np.min([data.shape[0] + (shape[0] - pos_y), data.shape[0]])", "if name in out_dict: out_dict[name] = np.append(out_dict[name], data_out) else: out_dict[name]", "result = l[prev:prev+s] prev += s yield result def generate_images(cell_list,", "min2:max2] += ((i+1)*data_elem) else: out_dict[name][min1:max1, min2:max2] += data_elem return out_dict", "pos_x - int(np.floor(data.shape[1])) max2 = min2 + data.shape[1] # Crop", "v return out_dict def generate_image(cells, shape, max_dist=5): thetas = 360", "valid_position = True for name in data.names: data_elem = data_cropped.data_dict[name]", "np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer']) tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary']) tifffile.imsave(os.path.join(data_dir, 'images',", "[chunk for chunk in tqdm(chunk_list(cell_list, nums))] dicts = [generate_image(cells, shape)", "2) return img def gen_im(data_dir): \"\"\"Generate microscopy images from a", "> 0] assert sum(nums) < len(cell_list), 'Not enough cells' chunked", "out_binary = (out_dict['binary'] > 0).astype(int) distance_map = mh.distance(1 - out_binary,", "(512, 512)) if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) np.save(os.path.join(data_dir, 'images',", "the edges of the image min1 = np.max([min1, 0]) max1", "len(xi)).reshape(len(yi), len(xi)) x, y = storm_table['x'], storm_table['y'] img = np.zeros_like(x_coords)", "= np.arange(step / 2, ymax, step) x_coords = np.repeat(xi, len(yi)).reshape(len(xi),", "np.min([data.shape[0] + (shape[0] - pos_y), data.shape[0]]) d_min2 = np.max([0 -", "- pos_x), data.shape[1]]) data_cropped = data[d_min1:d_max1, d_min2:d_max2] # Limit image", "'images', 'foci_outer.npy'), out_dict['foci_outer']) np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner']) np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'),", "in zip(cells, thetas)] assert all([data.names == data_list[0].names for data in", "enumerate(dicts): for k, v in d.items(): if 'storm' in k:", "step) yi = np.arange(step / 2, ymax, step) x_coords =", "'cells_final_selected.hdf5')) out_dict = generate_images(cell_list, 1000, 10, 3, (512, 512)) if", "data_list[0].dclasses) if dclass != 'storm'} for i, data in enumerate(data_list):", "+= min1 xmax, ymax = shape[1], shape[0] bools = (data_elem['x']", "out_dict[k] = np.zeros((num_images, *shape)) out_dict[k][i] = v return out_dict def", "Crop the data for when the cell is on the", "intensities = storm_table['intensity'] sigma = sigma * np.ones_like(x) if not", "out_dict['storm_outer']) tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary']) tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield']) tifffile.imsave(os.path.join(data_dir,", "brightfield images\"\"\" noise = 20 img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy'))", "out_dict['binary']) np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield']) np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner']) np.save(os.path.join(data_dir,", "= nums[nums > 0] assert sum(nums) < len(cell_list), 'Not enough", "data_list]), 'All cells must have the same data elements' out_dict", "= add_readout_noise(img, noise) tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img) np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)),", "data[d_min1:d_max1, d_min2:d_max2] # Limit image position to the edges of", "np.arange(step / 2, ymax, step) x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T", "np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer']) np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner']) np.save(os.path.join(data_dir, 'images',", "+= s yield result def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape):", "pos_x = int(np.round(shape[1] * np.random.rand())) pos_y = int(np.round(shape[0] * np.random.rand()))", "= np.max([0 - min2, 0]) d_max2 = np.min([data.shape[1] + (shape[1]", "k in out_dict: out_dict[k] = np.append(out_dict[k], v) else: out_dict[k] =", "else: out_dict[name] = data_out continue elif data_elem.dclass == 'binary': out_dict[name][min1:max1,", "pos_y - int(np.floor(data.shape[0])) max1 = min1 + data.shape[0] min2 =", "elif data_elem.dclass == 'binary': out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem) else: out_dict[name][min1:max1,", "out_dict[k] = np.append(out_dict[k], v) else: out_dict[k] = v else: if", "def chunk_list(l, sizes): prev = 0 for s in sizes:", "out_dict = {name: np.zeros(shape) for name, dclass in zip(data_list[0].names, data_list[0].dclasses)", "- pos_y), data.shape[0]]) d_min2 = np.max([0 - min2, 0]) d_max2", "min2 = pos_x - int(np.floor(data.shape[1])) max2 = min2 + data.shape[1]", "= np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi)) x,", "assert all([data.names == data_list[0].names for data in data_list]), 'All cells", "from a list of cell objects by placing them randomly", "wall img = (photons*(ratio-1))*img_stack + photons img = draw_poisson(img) img", "numpy as np import mahotas as mh from tqdm import", "import os import tifffile def chunk_list(l, sizes): prev = 0", "int(np.floor(data.shape[0])) max1 = min1 + data.shape[0] min2 = pos_x -", "to brightfield images\"\"\" noise = 20 img_stack = np.load(os.path.join(data_dir, 'images',", "_int * np.exp(-(((_x - x_coords) / _sigma) ** 2 +", "out_dict['foci_inner']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer']) np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner']) np.savetxt(os.path.join(data_dir,", "= [cell.data.rotate(theta) for cell, theta in zip(cells, thetas)] assert all([data.names", "ratio between 'background' (no cells) and cell wall img =", "'images')) np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary']) np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield']) np.save(os.path.join(data_dir,", "\"\"\"add poissonian and readout noise to brightfield images\"\"\" noise =", "if np.any(distance_map[temp_binary.astype(bool)] < max_dist): continue valid_position = True for name", "photons in [10000, 1000, 500]: ratio = 1.0453 # ratio", "= np.max([min2, 0]) max2 = np.min([max2, shape[1]]) temp_binary = np.zeros(shape)", "nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int) nums = nums[nums > 0]", "d.items(): if 'storm' in k: v['frame'] = i + 1", "while not valid_position: pos_x = int(np.round(shape[1] * np.random.rand())) pos_y =", "continue valid_position = True for name in data.names: data_elem =", "'images')): os.mkdir(os.path.join(data_dir, 'images')) np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary']) np.save(os.path.join(data_dir, 'images', 'brightfield.npy'),", "if data_elem.dclass == 'storm': data_elem['x'] += min2 data_elem['y'] += min1", "np.zeros(shape) for name, dclass in zip(data_list[0].names, data_list[0].dclasses) if dclass !=", "= (data_elem['x'] < 0) + (data_elem['x'] > xmax) + (data_elem['y']", "np.min([data.shape[1] + (shape[1] - pos_x), data.shape[1]]) data_cropped = data[d_min1:d_max1, d_min2:d_max2]", "np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner']) np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer']) np.save(os.path.join(data_dir, 'images',", "the same data elements' out_dict = {name: np.zeros(shape) for name,", "= data_cropped.data_dict[name] if data_elem.dclass == 'storm': data_elem['x'] += min2 data_elem['y']", "'images', 'bf_noise_{}_photons.npy'.format(photons)), img) if __name__ == '__main__': np.random.seed(42) data_dir =", "+ (data_elem['y'] > ymax) data_out = data_elem[~bools].copy() if name in", "distance_map = mh.distance(1 - out_binary, metric='euclidean') if np.any(distance_map[temp_binary.astype(bool)] < max_dist):", "# Limit image position to the edges of the image", "out_dict['foci_outer']) np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner']) np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer']) tifffile.imsave(os.path.join(data_dir,", "the border of the image d_min1 = np.max([0 - min1,", "os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary']) np.save(os.path.join(data_dir, 'images',", "/ 2, xmax, step) yi = np.arange(step / 2, ymax,", "pos_x), data.shape[1]]) data_cropped = data[d_min1:d_max1, d_min2:d_max2] # Limit image position", "max1 = np.min([max1, shape[0]]) min2 = np.max([min2, 0]) max2 =", "min2 + data.shape[1] # Crop the data for when the", "cell is on the border of the image d_min1 =", "int(np.round(shape[0] * np.random.rand())) min1 = pos_y - int(np.floor(data.shape[0])) max1 =", "!= 'storm'} for i, data in enumerate(data_list): valid_position = False", "valid_position = False while not valid_position: pos_x = int(np.round(shape[1] *", "512)) if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) np.save(os.path.join(data_dir, 'images', 'binary.npy'),", "np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield']) np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner']) np.save(os.path.join(data_dir, 'images',", "+ (data_elem['y'] < 0) + (data_elem['y'] > ymax) data_out =", "sigma_std else np.random.normal(sigma, sigma_std, size=len(x)) for _sigma, _int, _x, _y", "= False while not valid_position: pos_x = int(np.round(shape[1] * np.random.rand()))", "sum(nums) < len(cell_list), 'Not enough cells' chunked = [chunk for", "np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi)) x, y", "y = storm_table['x'], storm_table['y'] img = np.zeros_like(x_coords) intensities = storm_table['intensity']", "img def gen_im(data_dir): \"\"\"Generate microscopy images from a list of", "2, xmax, step) yi = np.arange(step / 2, ymax, step)", "xi = np.arange(step / 2, xmax, step) yi = np.arange(step", "- x_coords) / _sigma) ** 2 + ((_y - y_coords)", "= storm_table['intensity'] sigma = sigma * np.ones_like(x) if not sigma_std", "not sigma_std else np.random.normal(sigma, sigma_std, size=len(x)) for _sigma, _int, _x,", "* np.exp(-(((_x - x_coords) / _sigma) ** 2 + ((_y", "if 'storm' in k: v['frame'] = i + 1 if", "= {} for i, d in enumerate(dicts): for k, v", "= np.zeros((num_images, *shape)) out_dict[k][i] = v return out_dict def generate_image(cells,", "out_dict[name] = np.append(out_dict[name], data_out) else: out_dict[name] = data_out continue elif", "ymax, step) x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T y_coords = np.repeat(yi,", "is on the border of the image d_min1 = np.max([0", "d_max2 = np.min([data.shape[1] + (shape[1] - pos_x), data.shape[1]]) data_cropped =", "= draw_poisson(img) img = add_readout_noise(img, noise) tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img)", "sigma=1.54, sigma_std=0.3): xmax = shape[1] ymax = shape[0] step =", "name, dclass in zip(data_list[0].names, data_list[0].dclasses) if dclass != 'storm'} for", "= min1 + data.shape[0] min2 = pos_x - int(np.floor(data.shape[1])) max2", "for _sigma, _int, _x, _y in zip(sigma, intensities, x, y):", "when the cell is on the border of the image", "int(np.floor(data.shape[1])) max2 = min2 + data.shape[1] # Crop the data", "_y in zip(sigma, intensities, x, y): img += _int *", "on the border of the image d_min1 = np.max([0 -", "in data.names: data_elem = data_cropped.data_dict[name] if data_elem.dclass == 'storm': data_elem['x']", "/ 2, ymax, step) x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T y_coords", "s yield result def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape): nums", "tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary']) tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield']) tifffile.imsave(os.path.join(data_dir, 'images',", "= data_elem[~bools].copy() if name in out_dict: out_dict[name] = np.append(out_dict[name], data_out)", "< max_dist): continue valid_position = True for name in data.names:", "objects by placing them randomly oriented in the image.\"\"\" cell_list", "images from a list of cell objects by placing them", "= 360 * np.random.rand(len(cells)) data_list = [cell.data.rotate(theta) for cell, theta", "yield result def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape): nums =", "i, data in enumerate(data_list): valid_position = False while not valid_position:", "from colicoords import load import numpy as np import mahotas", "ymax = shape[1], shape[0] bools = (data_elem['x'] < 0) +", "return out_dict def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3): xmax = shape[1]", "import mahotas as mh from tqdm import tqdm import os", "min2 = np.max([min2, 0]) max2 = np.min([max2, shape[1]]) temp_binary =", "sigma * np.ones_like(x) if not sigma_std else np.random.normal(sigma, sigma_std, size=len(x))", "tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner']) tifffile.imsave(os.path.join(data_dir, 'images',", "chunked = [chunk for chunk in tqdm(chunk_list(cell_list, nums))] dicts =", "'storm_outer.txt'), out_dict['storm_inner']) def noise_bf(data_dir): \"\"\"add poissonian and readout noise to", "'binary.tif'), out_dict['binary']) tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner'])", "generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape): nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int)", "chunk in tqdm(chunk_list(cell_list, nums))] dicts = [generate_image(cells, shape) for cells", "for k, v in d.items(): if 'storm' in k: v['frame']", "min2:max2] = data_cropped.binary_img out_binary = (out_dict['binary'] > 0).astype(int) distance_map =", "'bf_noise_{}_photons.npy'.format(photons)), img) if __name__ == '__main__': np.random.seed(42) data_dir = r'.'", "2, ymax, step) x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T y_coords =", "shape[0] step = 1 xi = np.arange(step / 2, xmax,", "data.shape[1] # Crop the data for when the cell is", "shape[1] ymax = shape[0] step = 1 xi = np.arange(step", "os.mkdir(os.path.join(data_dir, 'images')) np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary']) np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield'])", "'images', 'foci_inner.npy'), out_dict['foci_inner']) np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer']) np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'),", "np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner']) np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner']) def noise_bf(data_dir):", "+ (shape[0] - pos_y), data.shape[0]]) d_min2 = np.max([0 - min2,", "1 xi = np.arange(step / 2, xmax, step) yi =", "if dclass != 'storm'} for i, data in enumerate(data_list): valid_position", "< 0) + (data_elem['x'] > xmax) + (data_elem['y'] < 0)", "== '__main__': np.random.seed(42) data_dir = r'.' if not os.path.exists(os.path.join(data_dir, 'images')):", "between 'background' (no cells) and cell wall img = (photons*(ratio-1))*img_stack", "data_list = [cell.data.rotate(theta) for cell, theta in zip(cells, thetas)] assert", "step = 1 xi = np.arange(step / 2, xmax, step)", "= np.min([max2, shape[1]]) temp_binary = np.zeros(shape) temp_binary[min1:max1, min2:max2] = data_cropped.binary_img", "= v else: if k in out_dict: out_dict[k][i] = v", "out_dict = generate_images(cell_list, 1000, 10, 3, (512, 512)) if not", "yi = np.arange(step / 2, ymax, step) x_coords = np.repeat(xi,", "= 1.0453 # ratio between 'background' (no cells) and cell", "if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary'])", "((_y - y_coords) / _sigma) ** 2) / 2) return", "(shape[1] - pos_x), data.shape[1]]) data_cropped = data[d_min1:d_max1, d_min2:d_max2] # Limit", "the image min1 = np.max([min1, 0]) max1 = np.min([max1, shape[0]])", "360 * np.random.rand(len(cells)) data_list = [cell.data.rotate(theta) for cell, theta in", "same data elements' out_dict = {name: np.zeros(shape) for name, dclass", "np.random.normal(sigma, sigma_std, size=len(x)) for _sigma, _int, _x, _y in zip(sigma,", "+ photons img = draw_poisson(img) img = add_readout_noise(img, noise) tifffile.imsave(os.path.join(data_dir,", "storm_table['x'], storm_table['y'] img = np.zeros_like(x_coords) intensities = storm_table['intensity'] sigma =", "* np.random.rand(len(cells)) data_list = [cell.data.rotate(theta) for cell, theta in zip(cells,", "y): img += _int * np.exp(-(((_x - x_coords) / _sigma)", "y_coords) / _sigma) ** 2) / 2) return img def", "len(xi)) x, y = storm_table['x'], storm_table['y'] img = np.zeros_like(x_coords) intensities", "_sigma) ** 2 + ((_y - y_coords) / _sigma) **", "cell objects by placing them randomly oriented in the image.\"\"\"", "data_cropped.data_dict[name] if data_elem.dclass == 'storm': data_elem['x'] += min2 data_elem['y'] +=", "in out_dict: out_dict[name] = np.append(out_dict[name], data_out) else: out_dict[name] = data_out", "of cell objects by placing them randomly oriented in the", "d in enumerate(dicts): for k, v in d.items(): if 'storm'", "= np.min([data.shape[1] + (shape[1] - pos_x), data.shape[1]]) data_cropped = data[d_min1:d_max1,", "ymax = shape[0] step = 1 xi = np.arange(step /", "'storm_outer.npy'), out_dict['storm_outer']) tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary']) tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield'])", "num_images, cell_per_img, cell_per_img_std, shape): nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int) nums", "xmax = shape[1] ymax = shape[0] step = 1 xi", "np import mahotas as mh from tqdm import tqdm import", "'binary.npy'), out_dict['binary']) np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield']) np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner'])", "generate_images(cell_list, 1000, 10, 3, (512, 512)) if not os.path.exists(os.path.join(data_dir, 'images')):", "(data_elem['y'] < 0) + (data_elem['y'] > ymax) data_out = data_elem[~bools].copy()", "= np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int) nums = nums[nums > 0] assert", "0] assert sum(nums) < len(cell_list), 'Not enough cells' chunked =", "np.exp(-(((_x - x_coords) / _sigma) ** 2 + ((_y -", "+ ((_y - y_coords) / _sigma) ** 2) / 2)", "+= ((i+1)*data_elem) else: out_dict[name][min1:max1, min2:max2] += data_elem return out_dict def", "if not sigma_std else np.random.normal(sigma, sigma_std, size=len(x)) for _sigma, _int,", "a list of cell objects by placing them randomly oriented", "+ (shape[1] - pos_x), data.shape[1]]) data_cropped = data[d_min1:d_max1, d_min2:d_max2] #", "np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img) if __name__ == '__main__': np.random.seed(42) data_dir", "storm_table['intensity'] sigma = sigma * np.ones_like(x) if not sigma_std else", "shape[0]]) min2 = np.max([min2, 0]) max2 = np.min([max2, shape[1]]) temp_binary", "__name__ == '__main__': np.random.seed(42) data_dir = r'.' if not os.path.exists(os.path.join(data_dir,", "'storm_inner.npy'), out_dict['storm_inner']) np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer']) tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary'])", "out_dict[k][i] = v else: out_dict[k] = np.zeros((num_images, *shape)) out_dict[k][i] =", "name in out_dict: out_dict[name] = np.append(out_dict[name], data_out) else: out_dict[name] =", "out_dict[k] = v else: if k in out_dict: out_dict[k][i] =", "out_dict['storm_inner']) def noise_bf(data_dir): \"\"\"add poissonian and readout noise to brightfield", "= np.append(out_dict[k], v) else: out_dict[k] = v else: if k", "image position to the edges of the image min1 =", "np.max([min1, 0]) max1 = np.min([max1, shape[0]]) min2 = np.max([min2, 0])", "len(cell_list), 'Not enough cells' chunked = [chunk for chunk in", "out_dict[name] = data_out continue elif data_elem.dclass == 'binary': out_dict[name][min1:max1, min2:max2]", "i + 1 if k in out_dict: out_dict[k] = np.append(out_dict[k],", "bools = (data_elem['x'] < 0) + (data_elem['x'] > xmax) +", "images\"\"\" noise = 20 img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy')) for", "out_dict[name][min1:max1, min2:max2] += data_elem return out_dict def gen_image_from_storm(storm_table, shape, sigma=1.54,", "np.max([min2, 0]) max2 = np.min([max2, shape[1]]) temp_binary = np.zeros(shape) temp_binary[min1:max1,", "(out_dict['binary'] > 0).astype(int) distance_map = mh.distance(1 - out_binary, metric='euclidean') if", "noise) tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img) np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img) if", "((i+1)*data_elem) else: out_dict[name][min1:max1, min2:max2] += data_elem return out_dict def gen_image_from_storm(storm_table,", "mh.distance(1 - out_binary, metric='euclidean') if np.any(distance_map[temp_binary.astype(bool)] < max_dist): continue valid_position", "not valid_position: pos_x = int(np.round(shape[1] * np.random.rand())) pos_y = int(np.round(shape[0]", "(no cells) and cell wall img = (photons*(ratio-1))*img_stack + photons", "for i, data in enumerate(data_list): valid_position = False while not", "data for when the cell is on the border of", "of the image min1 = np.max([min1, 0]) max1 = np.min([max1,", "1000, 10, 3, (512, 512)) if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir,", "= np.min([max1, shape[0]]) min2 = np.max([min2, 0]) max2 = np.min([max2,", "out_dict['brightfield']) np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner']) np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer']) np.save(os.path.join(data_dir,", "if __name__ == '__main__': np.random.seed(42) data_dir = r'.' if not", "'Not enough cells' chunked = [chunk for chunk in tqdm(chunk_list(cell_list,", "data_list[0].names for data in data_list]), 'All cells must have the", "min1 = np.max([min1, 0]) max1 = np.min([max1, shape[0]]) min2 =", "= (out_dict['binary'] > 0).astype(int) distance_map = mh.distance(1 - out_binary, metric='euclidean')", "{name: np.zeros(shape) for name, dclass in zip(data_list[0].names, data_list[0].dclasses) if dclass", "np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int) nums = nums[nums > 0] assert sum(nums)", "= np.min([data.shape[0] + (shape[0] - pos_y), data.shape[0]]) d_min2 = np.max([0", "shape[1], shape[0] bools = (data_elem['x'] < 0) + (data_elem['x'] >", "max_dist=5): thetas = 360 * np.random.rand(len(cells)) data_list = [cell.data.rotate(theta) for", "+= data_elem return out_dict def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3): xmax", "_x, _y in zip(sigma, intensities, x, y): img += _int", "+= min2 data_elem['y'] += min1 xmax, ymax = shape[1], shape[0]", "metric='euclidean') if np.any(distance_map[temp_binary.astype(bool)] < max_dist): continue valid_position = True for", "d_max1 = np.min([data.shape[0] + (shape[0] - pos_y), data.shape[0]]) d_min2 =", "0) + (data_elem['x'] > xmax) + (data_elem['y'] < 0) +", "= {name: np.zeros(shape) for name, dclass in zip(data_list[0].names, data_list[0].dclasses) if", "for i, d in enumerate(dicts): for k, v in d.items():", "data_out continue elif data_elem.dclass == 'binary': out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem)", "all([data.names == data_list[0].names for data in data_list]), 'All cells must", "+ data.shape[1] # Crop the data for when the cell", "continue elif data_elem.dclass == 'binary': out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem) else:", "out_dict[k][i] = v return out_dict def generate_image(cells, shape, max_dist=5): thetas", "the data for when the cell is on the border", "v) else: out_dict[k] = v else: if k in out_dict:", "0]) max1 = np.min([max1, shape[0]]) min2 = np.max([min2, 0]) max2", "1.0453 # ratio between 'background' (no cells) and cell wall", "'background' (no cells) and cell wall img = (photons*(ratio-1))*img_stack +", "= l[prev:prev+s] prev += s yield result def generate_images(cell_list, num_images,", "poissonian and readout noise to brightfield images\"\"\" noise = 20", "'brightfield.npy'), out_dict['brightfield']) np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner']) np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer'])", "colicoords import load import numpy as np import mahotas as", "for chunk in tqdm(chunk_list(cell_list, nums))] dicts = [generate_image(cells, shape) for", "np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner']) np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer']) tifffile.imsave(os.path.join(data_dir, 'images',", "gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3): xmax = shape[1] ymax = shape[0]", "/ _sigma) ** 2 + ((_y - y_coords) / _sigma)", "data_elem.dclass == 'binary': out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem) else: out_dict[name][min1:max1, min2:max2]", "# ratio between 'background' (no cells) and cell wall img", "in zip(data_list[0].names, data_list[0].dclasses) if dclass != 'storm'} for i, data", "** 2 + ((_y - y_coords) / _sigma) ** 2)", "dclass in zip(data_list[0].names, data_list[0].dclasses) if dclass != 'storm'} for i,", "np.any(distance_map[temp_binary.astype(bool)] < max_dist): continue valid_position = True for name in", "as np import mahotas as mh from tqdm import tqdm", "position to the edges of the image min1 = np.max([min1,", "v else: if k in out_dict: out_dict[k][i] = v else:", "* np.random.rand())) min1 = pos_y - int(np.floor(data.shape[0])) max1 = min1", "cell_per_img_std, shape): nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int) nums = nums[nums", "= [generate_image(cells, shape) for cells in tqdm(chunked)] out_dict = {}", "max2 = np.min([max2, shape[1]]) temp_binary = np.zeros(shape) temp_binary[min1:max1, min2:max2] =", "'storm' in k: v['frame'] = i + 1 if k", "data_elem = data_cropped.data_dict[name] if data_elem.dclass == 'storm': data_elem['x'] += min2", "int(np.round(shape[1] * np.random.rand())) pos_y = int(np.round(shape[0] * np.random.rand())) min1 =", "0 for s in sizes: result = l[prev:prev+s] prev +=", "in zip(sigma, intensities, x, y): img += _int * np.exp(-(((_x", "0]) d_max1 = np.min([data.shape[0] + (shape[0] - pos_y), data.shape[0]]) d_min2", "10, 3, (512, 512)) if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images'))", "xmax, step) yi = np.arange(step / 2, ymax, step) x_coords", "np.min([max2, shape[1]]) temp_binary = np.zeros(shape) temp_binary[min1:max1, min2:max2] = data_cropped.binary_img out_binary", "generate_image(cells, shape, max_dist=5): thetas = 360 * np.random.rand(len(cells)) data_list =", "0]) d_max2 = np.min([data.shape[1] + (shape[1] - pos_x), data.shape[1]]) data_cropped", "np.append(out_dict[k], v) else: out_dict[k] = v else: if k in", "np.random.rand(len(cells)) data_list = [cell.data.rotate(theta) for cell, theta in zip(cells, thetas)]", "'images', 'foci_inner.tif'), out_dict['foci_inner']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer']) np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'),", "ratio = 1.0453 # ratio between 'background' (no cells) and", "import numpy as np import mahotas as mh from tqdm", "theta in zip(cells, thetas)] assert all([data.names == data_list[0].names for data", "= np.append(out_dict[name], data_out) else: out_dict[name] = data_out continue elif data_elem.dclass", "draw_poisson from colicoords import load import numpy as np import", "= np.load(os.path.join(data_dir, 'images', 'brightfield.npy')) for photons in [10000, 1000, 500]:", "thetas)] assert all([data.names == data_list[0].names for data in data_list]), 'All", "else: out_dict[k] = np.zeros((num_images, *shape)) out_dict[k][i] = v return out_dict", "mh from tqdm import tqdm import os import tifffile def", "for cell, theta in zip(cells, thetas)] assert all([data.names == data_list[0].names", "dclass != 'storm'} for i, data in enumerate(data_list): valid_position =", "data_out) else: out_dict[name] = data_out continue elif data_elem.dclass == 'binary':", "= True for name in data.names: data_elem = data_cropped.data_dict[name] if", "'images', 'brightfield.tif'), out_dict['brightfield']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'),", "else: out_dict[name][min1:max1, min2:max2] += data_elem return out_dict def gen_image_from_storm(storm_table, shape,", "'binary': out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem) else: out_dict[name][min1:max1, min2:max2] += data_elem", "'__main__': np.random.seed(42) data_dir = r'.' if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir,", "l[prev:prev+s] prev += s yield result def generate_images(cell_list, num_images, cell_per_img,", "max1 = min1 + data.shape[0] min2 = pos_x - int(np.floor(data.shape[1]))", "'foci_outer.npy'), out_dict['foci_outer']) np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner']) np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer'])", "out_dict['storm_inner']) np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner']) def noise_bf(data_dir): \"\"\"add poissonian and", "'images', 'bf_noise_{}_photons.tif'.format(photons)), img) np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img) if __name__ ==", "min1 = pos_y - int(np.floor(data.shape[0])) max1 = min1 + data.shape[0]", "by placing them randomly oriented in the image.\"\"\" cell_list =", "image d_min1 = np.max([0 - min1, 0]) d_max1 = np.min([data.shape[0]", "'foci_inner.tif'), out_dict['foci_inner']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer']) np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner'])", "'images', 'brightfield.npy')) for photons in [10000, 1000, 500]: ratio =", "edges of the image min1 = np.max([min1, 0]) max1 =", "ymax) data_out = data_elem[~bools].copy() if name in out_dict: out_dict[name] =", "readout noise to brightfield images\"\"\" noise = 20 img_stack =", "in enumerate(dicts): for k, v in d.items(): if 'storm' in", "for cells in tqdm(chunked)] out_dict = {} for i, d", "y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi)) x, y = storm_table['x'], storm_table['y']", "np.append(out_dict[name], data_out) else: out_dict[name] = data_out continue elif data_elem.dclass ==", "/ 2) return img def gen_im(data_dir): \"\"\"Generate microscopy images from", "_sigma, _int, _x, _y in zip(sigma, intensities, x, y): img", "data.shape[0] min2 = pos_x - int(np.floor(data.shape[1])) max2 = min2 +", "for name in data.names: data_elem = data_cropped.data_dict[name] if data_elem.dclass ==", "\"\"\"Generate microscopy images from a list of cell objects by", "data.names: data_elem = data_cropped.data_dict[name] if data_elem.dclass == 'storm': data_elem['x'] +=", "intensities, x, y): img += _int * np.exp(-(((_x - x_coords)", "in tqdm(chunk_list(cell_list, nums))] dicts = [generate_image(cells, shape) for cells in", "'images', 'storm_outer.txt'), out_dict['storm_inner']) def noise_bf(data_dir): \"\"\"add poissonian and readout noise", "cell wall img = (photons*(ratio-1))*img_stack + photons img = draw_poisson(img)", "data_cropped.binary_img out_binary = (out_dict['binary'] > 0).astype(int) distance_map = mh.distance(1 -", "0) + (data_elem['y'] > ymax) data_out = data_elem[~bools].copy() if name", "sizes): prev = 0 for s in sizes: result =", "+ (data_elem['x'] > xmax) + (data_elem['y'] < 0) + (data_elem['y']", "np.max([0 - min2, 0]) d_max2 = np.min([data.shape[1] + (shape[1] -", "cells must have the same data elements' out_dict = {name:", "'images', 'foci_outer.tif'), out_dict['foci_outer']) np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner']) np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'),", "pos_y), data.shape[0]]) d_min2 = np.max([0 - min2, 0]) d_max2 =", "v else: out_dict[k] = np.zeros((num_images, *shape)) out_dict[k][i] = v return", "else: if k in out_dict: out_dict[k][i] = v else: out_dict[k]", "sigma = sigma * np.ones_like(x) if not sigma_std else np.random.normal(sigma,", "x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi))", "(shape[0] - pos_y), data.shape[0]]) d_min2 = np.max([0 - min2, 0])", "= v else: out_dict[k] = np.zeros((num_images, *shape)) out_dict[k][i] = v", "= 0 for s in sizes: result = l[prev:prev+s] prev", "k in out_dict: out_dict[k][i] = v else: out_dict[k] = np.zeros((num_images,", "img += _int * np.exp(-(((_x - x_coords) / _sigma) **", "tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer']) np.savetxt(os.path.join(data_dir, 'images',", "len(yi)).T y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi)) x, y = storm_table['x'],", "shape[1]]) temp_binary = np.zeros(shape) temp_binary[min1:max1, min2:max2] = data_cropped.binary_img out_binary =", "placing them randomly oriented in the image.\"\"\" cell_list = load(os.path.join(data_dir,", "/ _sigma) ** 2) / 2) return img def gen_im(data_dir):", "- min2, 0]) d_max2 = np.min([data.shape[1] + (shape[1] - pos_x),", "v['frame'] = i + 1 if k in out_dict: out_dict[k]", "data_cropped = data[d_min1:d_max1, d_min2:d_max2] # Limit image position to the", "data_elem[~bools].copy() if name in out_dict: out_dict[name] = np.append(out_dict[name], data_out) else:", "data in data_list]), 'All cells must have the same data", "= data[d_min1:d_max1, d_min2:d_max2] # Limit image position to the edges", "dicts = [generate_image(cells, shape) for cells in tqdm(chunked)] out_dict =", "np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary']) np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield']) np.save(os.path.join(data_dir, 'images',", "colicoords.synthetic_data import add_readout_noise, draw_poisson from colicoords import load import numpy", "sigma_std, size=len(x)) for _sigma, _int, _x, _y in zip(sigma, intensities,", "randomly oriented in the image.\"\"\" cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5'))", "for photons in [10000, 1000, 500]: ratio = 1.0453 #", "'images', 'storm_inner.npy'), out_dict['storm_inner']) np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer']) tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'),", "out_dict['storm_inner']) np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer']) tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary']) tifffile.imsave(os.path.join(data_dir,", "= v return out_dict def generate_image(cells, shape, max_dist=5): thetas =", "cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5')) out_dict = generate_images(cell_list, 1000, 10,", "'images', 'binary.tif'), out_dict['binary']) tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'),", "+ data.shape[0] min2 = pos_x - int(np.floor(data.shape[1])) max2 = min2", "min1 + data.shape[0] min2 = pos_x - int(np.floor(data.shape[1])) max2 =", "and cell wall img = (photons*(ratio-1))*img_stack + photons img =", "'brightfield.npy')) for photons in [10000, 1000, 500]: ratio = 1.0453", "True for name in data.names: data_elem = data_cropped.data_dict[name] if data_elem.dclass", "out_dict['binary']) tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner']) tifffile.imsave(os.path.join(data_dir,", "*shape)) out_dict[k][i] = v return out_dict def generate_image(cells, shape, max_dist=5):", "else np.random.normal(sigma, sigma_std, size=len(x)) for _sigma, _int, _x, _y in", "- out_binary, metric='euclidean') if np.any(distance_map[temp_binary.astype(bool)] < max_dist): continue valid_position =", "out_dict['brightfield']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner']) tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer']) np.savetxt(os.path.join(data_dir,", "= r'.' if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) gen_im(data_dir) noise_bf(data_dir)", "if k in out_dict: out_dict[k] = np.append(out_dict[k], v) else: out_dict[k]", "shape, sigma=1.54, sigma_std=0.3): xmax = shape[1] ymax = shape[0] step", "data in enumerate(data_list): valid_position = False while not valid_position: pos_x", "* np.ones_like(x) if not sigma_std else np.random.normal(sigma, sigma_std, size=len(x)) for", "mahotas as mh from tqdm import tqdm import os import", "= np.arange(step / 2, xmax, step) yi = np.arange(step /", "np.zeros_like(x_coords) intensities = storm_table['intensity'] sigma = sigma * np.ones_like(x) if", "for s in sizes: result = l[prev:prev+s] prev += s", "data_elem['x'] += min2 data_elem['y'] += min1 xmax, ymax = shape[1],", "np.zeros(shape) temp_binary[min1:max1, min2:max2] = data_cropped.binary_img out_binary = (out_dict['binary'] > 0).astype(int)", "result def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape): nums = np.round(np.random.normal(cell_per_img,", "= data_cropped.binary_img out_binary = (out_dict['binary'] > 0).astype(int) distance_map = mh.distance(1", "list of cell objects by placing them randomly oriented in", "if k in out_dict: out_dict[k][i] = v else: out_dict[k] =", "k, v in d.items(): if 'storm' in k: v['frame'] =", "the cell is on the border of the image d_min1", "= 20 img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy')) for photons in", "= storm_table['x'], storm_table['y'] img = np.zeros_like(x_coords) intensities = storm_table['intensity'] sigma", "assert sum(nums) < len(cell_list), 'Not enough cells' chunked = [chunk", "and readout noise to brightfield images\"\"\" noise = 20 img_stack", "2 + ((_y - y_coords) / _sigma) ** 2) /", "from colicoords.synthetic_data import add_readout_noise, draw_poisson from colicoords import load import", "storm_table['y'] img = np.zeros_like(x_coords) intensities = storm_table['intensity'] sigma = sigma", "= (photons*(ratio-1))*img_stack + photons img = draw_poisson(img) img = add_readout_noise(img,", "= shape[1] ymax = shape[0] step = 1 xi =", "in d.items(): if 'storm' in k: v['frame'] = i +", "shape[0] bools = (data_elem['x'] < 0) + (data_elem['x'] > xmax)", "1 if k in out_dict: out_dict[k] = np.append(out_dict[k], v) else:", "os import tifffile def chunk_list(l, sizes): prev = 0 for", "out_dict: out_dict[name] = np.append(out_dict[name], data_out) else: out_dict[name] = data_out continue", "as mh from tqdm import tqdm import os import tifffile", "+ 1 if k in out_dict: out_dict[k] = np.append(out_dict[k], v)", "(photons*(ratio-1))*img_stack + photons img = draw_poisson(img) img = add_readout_noise(img, noise)", "zip(sigma, intensities, x, y): img += _int * np.exp(-(((_x -", "== 'binary': out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem) else: out_dict[name][min1:max1, min2:max2] +=", "= mh.distance(1 - out_binary, metric='euclidean') if np.any(distance_map[temp_binary.astype(bool)] < max_dist): continue", "np.zeros((num_images, *shape)) out_dict[k][i] = v return out_dict def generate_image(cells, shape,", "out_dict['foci_outer']) np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner']) np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner']) def", "to the edges of the image min1 = np.max([min1, 0])", "data_elem['y'] += min1 xmax, ymax = shape[1], shape[0] bools =", "(data_elem['x'] < 0) + (data_elem['x'] > xmax) + (data_elem['y'] <", "== 'storm': data_elem['x'] += min2 data_elem['y'] += min1 xmax, ymax", "load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5')) out_dict = generate_images(cell_list, 1000, 10, 3, (512,", "** 2) / 2) return img def gen_im(data_dir): \"\"\"Generate microscopy", "# Crop the data for when the cell is on", "xmax) + (data_elem['y'] < 0) + (data_elem['y'] > ymax) data_out", "return img def gen_im(data_dir): \"\"\"Generate microscopy images from a list", "prev = 0 for s in sizes: result = l[prev:prev+s]", "tqdm(chunk_list(cell_list, nums))] dicts = [generate_image(cells, shape) for cells in tqdm(chunked)]", "[10000, 1000, 500]: ratio = 1.0453 # ratio between 'background'", "x_coords) / _sigma) ** 2 + ((_y - y_coords) /", "'bf_noise_{}_photons.tif'.format(photons)), img) np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img) if __name__ == '__main__':", "'images', 'brightfield.npy'), out_dict['brightfield']) np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner']) np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'),", "def gen_im(data_dir): \"\"\"Generate microscopy images from a list of cell", "> xmax) + (data_elem['y'] < 0) + (data_elem['y'] > ymax)", "np.load(os.path.join(data_dir, 'images', 'brightfield.npy')) for photons in [10000, 1000, 500]: ratio", "_int, _x, _y in zip(sigma, intensities, x, y): img +=", "= [chunk for chunk in tqdm(chunk_list(cell_list, nums))] dicts = [generate_image(cells,", "img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy')) for photons in [10000, 1000,", "cell_per_img_std, num_images)).astype(int) nums = nums[nums > 0] assert sum(nums) <", "np.max([0 - min1, 0]) d_max1 = np.min([data.shape[0] + (shape[0] -", "valid_position: pos_x = int(np.round(shape[1] * np.random.rand())) pos_y = int(np.round(shape[0] *", "+= _int * np.exp(-(((_x - x_coords) / _sigma) ** 2", "shape): nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int) nums = nums[nums >", "draw_poisson(img) img = add_readout_noise(img, noise) tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img) np.save(os.path.join(data_dir,", "= shape[0] step = 1 xi = np.arange(step / 2,", "[generate_image(cells, shape) for cells in tqdm(chunked)] out_dict = {} for", "tqdm import os import tifffile def chunk_list(l, sizes): prev =", "np.random.rand())) min1 = pos_y - int(np.floor(data.shape[0])) max1 = min1 +", "microscopy images from a list of cell objects by placing", "for data in data_list]), 'All cells must have the same", "border of the image d_min1 = np.max([0 - min1, 0])", "- y_coords) / _sigma) ** 2) / 2) return img", "def generate_image(cells, shape, max_dist=5): thetas = 360 * np.random.rand(len(cells)) data_list", "in data_list]), 'All cells must have the same data elements'", "in [10000, 1000, 500]: ratio = 1.0453 # ratio between", "sizes: result = l[prev:prev+s] prev += s yield result def", "nums))] dicts = [generate_image(cells, shape) for cells in tqdm(chunked)] out_dict", "= min2 + data.shape[1] # Crop the data for when", "of the image d_min1 = np.max([0 - min1, 0]) d_max1", "== data_list[0].names for data in data_list]), 'All cells must have", "data_elem.dclass == 'storm': data_elem['x'] += min2 data_elem['y'] += min1 xmax,", "v in d.items(): if 'storm' in k: v['frame'] = i", "np.ones_like(x) if not sigma_std else np.random.normal(sigma, sigma_std, size=len(x)) for _sigma,", "d_min2 = np.max([0 - min2, 0]) d_max2 = np.min([data.shape[1] +", "d_min1 = np.max([0 - min1, 0]) d_max1 = np.min([data.shape[0] +", "= shape[1], shape[0] bools = (data_elem['x'] < 0) + (data_elem['x']", "{} for i, d in enumerate(dicts): for k, v in", "img = np.zeros_like(x_coords) intensities = storm_table['intensity'] sigma = sigma *", "else: out_dict[k] = v else: if k in out_dict: out_dict[k][i]", "temp_binary[min1:max1, min2:max2] = data_cropped.binary_img out_binary = (out_dict['binary'] > 0).astype(int) distance_map", "data_out = data_elem[~bools].copy() if name in out_dict: out_dict[name] = np.append(out_dict[name],", "> ymax) data_out = data_elem[~bools].copy() if name in out_dict: out_dict[name]", "photons img = draw_poisson(img) img = add_readout_noise(img, noise) tifffile.imsave(os.path.join(data_dir, 'images',", "add_readout_noise(img, noise) tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img) np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img)", "sigma_std=0.3): xmax = shape[1] ymax = shape[0] step = 1", "them randomly oriented in the image.\"\"\" cell_list = load(os.path.join(data_dir, 'cell_obj',", "the image d_min1 = np.max([0 - min1, 0]) d_max1 =", "len(yi)).reshape(len(xi), len(yi)).T y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi)) x, y =", "step) x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T y_coords = np.repeat(yi, len(xi)).reshape(len(yi),", "in enumerate(data_list): valid_position = False while not valid_position: pos_x =", "img) np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img) if __name__ == '__main__': np.random.seed(42)", "nums = nums[nums > 0] assert sum(nums) < len(cell_list), 'Not", "not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary']) np.save(os.path.join(data_dir,", "img = add_readout_noise(img, noise) tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img) np.save(os.path.join(data_dir, 'images',", "elements' out_dict = {name: np.zeros(shape) for name, dclass in zip(data_list[0].names,", "data.shape[1]]) data_cropped = data[d_min1:d_max1, d_min2:d_max2] # Limit image position to", "pos_y = int(np.round(shape[0] * np.random.rand())) min1 = pos_y - int(np.floor(data.shape[0]))", "= sigma * np.ones_like(x) if not sigma_std else np.random.normal(sigma, sigma_std,", "np.random.seed(42) data_dir = r'.' if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images'))", "np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner']) def noise_bf(data_dir): \"\"\"add poissonian and readout", "> 0).astype(int) distance_map = mh.distance(1 - out_binary, metric='euclidean') if np.any(distance_map[temp_binary.astype(bool)]", "in sizes: result = l[prev:prev+s] prev += s yield result", "tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img) np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img) if __name__", "= np.zeros_like(x_coords) intensities = storm_table['intensity'] sigma = sigma * np.ones_like(x)", "cell_per_img, cell_per_img_std, shape): nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int) nums =", "zip(data_list[0].names, data_list[0].dclasses) if dclass != 'storm'} for i, data in", "'images', 'binary.npy'), out_dict['binary']) np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield']) np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'),", "in out_dict: out_dict[k] = np.append(out_dict[k], v) else: out_dict[k] = v", "min2, 0]) d_max2 = np.min([data.shape[1] + (shape[1] - pos_x), data.shape[1]])", "(data_elem['y'] > ymax) data_out = data_elem[~bools].copy() if name in out_dict:", "gen_im(data_dir): \"\"\"Generate microscopy images from a list of cell objects", "1000, 500]: ratio = 1.0453 # ratio between 'background' (no", "* np.random.rand())) pos_y = int(np.round(shape[0] * np.random.rand())) min1 = pos_y", "min2 data_elem['y'] += min1 xmax, ymax = shape[1], shape[0] bools", "np.arange(step / 2, xmax, step) yi = np.arange(step / 2,", "import tqdm import os import tifffile def chunk_list(l, sizes): prev", "out_dict: out_dict[k] = np.append(out_dict[k], v) else: out_dict[k] = v else:", "return out_dict def generate_image(cells, shape, max_dist=5): thetas = 360 *", "= np.max([0 - min1, 0]) d_max1 = np.min([data.shape[0] + (shape[0]", "'storm_inner.txt'), out_dict['storm_inner']) np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner']) def noise_bf(data_dir): \"\"\"add poissonian", "cells) and cell wall img = (photons*(ratio-1))*img_stack + photons img", "import tifffile def chunk_list(l, sizes): prev = 0 for s", "cells in tqdm(chunked)] out_dict = {} for i, d in", "have the same data elements' out_dict = {name: np.zeros(shape) for", "min2:max2] += data_elem return out_dict def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3):", "out_dict def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3): xmax = shape[1] ymax", "data elements' out_dict = {name: np.zeros(shape) for name, dclass in", "zip(cells, thetas)] assert all([data.names == data_list[0].names for data in data_list]),", "def noise_bf(data_dir): \"\"\"add poissonian and readout noise to brightfield images\"\"\"", "0).astype(int) distance_map = mh.distance(1 - out_binary, metric='euclidean') if np.any(distance_map[temp_binary.astype(bool)] <", "np.repeat(yi, len(xi)).reshape(len(yi), len(xi)) x, y = storm_table['x'], storm_table['y'] img =", "prev += s yield result def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std,", "data_dir = r'.' if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) gen_im(data_dir)", "cells' chunked = [chunk for chunk in tqdm(chunk_list(cell_list, nums))] dicts", "temp_binary = np.zeros(shape) temp_binary[min1:max1, min2:max2] = data_cropped.binary_img out_binary = (out_dict['binary']", "noise = 20 img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy')) for photons", "'cell_obj', 'cells_final_selected.hdf5')) out_dict = generate_images(cell_list, 1000, 10, 3, (512, 512))", "shape) for cells in tqdm(chunked)] out_dict = {} for i,", "in tqdm(chunked)] out_dict = {} for i, d in enumerate(dicts):", "3, (512, 512)) if not os.path.exists(os.path.join(data_dir, 'images')): os.mkdir(os.path.join(data_dir, 'images')) np.save(os.path.join(data_dir,", "chunk_list(l, sizes): prev = 0 for s in sizes: result", "out_binary, metric='euclidean') if np.any(distance_map[temp_binary.astype(bool)] < max_dist): continue valid_position = True", "img) if __name__ == '__main__': np.random.seed(42) data_dir = r'.' if", "noise_bf(data_dir): \"\"\"add poissonian and readout noise to brightfield images\"\"\" noise", "'storm'} for i, data in enumerate(data_list): valid_position = False while", "data.shape[0]]) d_min2 = np.max([0 - min2, 0]) d_max2 = np.min([data.shape[1]", "out_dict def generate_image(cells, shape, max_dist=5): thetas = 360 * np.random.rand(len(cells))", "i, d in enumerate(dicts): for k, v in d.items(): if", "'images', 'storm_outer.npy'), out_dict['storm_outer']) tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary']) tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'),", "thetas = 360 * np.random.rand(len(cells)) data_list = [cell.data.rotate(theta) for cell,", "= np.max([min1, 0]) max1 = np.min([max1, shape[0]]) min2 = np.max([min2,", "2) / 2) return img def gen_im(data_dir): \"\"\"Generate microscopy images", "'foci_inner.npy'), out_dict['foci_inner']) np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer']) np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner'])", "def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3): xmax = shape[1] ymax =", "0]) max2 = np.min([max2, shape[1]]) temp_binary = np.zeros(shape) temp_binary[min1:max1, min2:max2]", "- int(np.floor(data.shape[1])) max2 = min2 + data.shape[1] # Crop the", "False while not valid_position: pos_x = int(np.round(shape[1] * np.random.rand())) pos_y" ]
[ "self._id = settings[\"id\"] self._title = settings[\"title\"] self._type = settings[\"type\"][\"name\"] self._value", "DeviceSettings: def __init__(self, settings): self._id = settings[\"id\"] self._title = settings[\"title\"]", "= settings[\"title\"] self._type = settings[\"type\"][\"name\"] self._value = settings[\"value\"] @property def", "def __init__(self, settings): self._id = settings[\"id\"] self._title = settings[\"title\"] self._type", "settings): self._id = settings[\"id\"] self._title = settings[\"title\"] self._type = settings[\"type\"][\"name\"]", "__init__(self, settings): self._id = settings[\"id\"] self._title = settings[\"title\"] self._type =", "<filename>epiphancloud/models/settings.py class DeviceSettings: def __init__(self, settings): self._id = settings[\"id\"] self._title", "settings[\"type\"][\"name\"] self._value = settings[\"value\"] @property def id(self): return self._id @property", "self._type = settings[\"type\"][\"name\"] self._value = settings[\"value\"] @property def id(self): return", "= settings[\"type\"][\"name\"] self._value = settings[\"value\"] @property def id(self): return self._id", "self._title = settings[\"title\"] self._type = settings[\"type\"][\"name\"] self._value = settings[\"value\"] @property", "= settings[\"value\"] @property def id(self): return self._id @property def value(self):", "settings[\"id\"] self._title = settings[\"title\"] self._type = settings[\"type\"][\"name\"] self._value = settings[\"value\"]", "class DeviceSettings: def __init__(self, settings): self._id = settings[\"id\"] self._title =", "settings[\"title\"] self._type = settings[\"type\"][\"name\"] self._value = settings[\"value\"] @property def id(self):", "@property def id(self): return self._id @property def value(self): return self._value", "= settings[\"id\"] self._title = settings[\"title\"] self._type = settings[\"type\"][\"name\"] self._value =", "self._value = settings[\"value\"] @property def id(self): return self._id @property def", "settings[\"value\"] @property def id(self): return self._id @property def value(self): return" ]
[ "a word: \") if word == \"a\": print(\"one; any\") elif", "print(\"one; any\") elif word == \"apple\": print(\"familiar, round fleshy fruit\")", "any\") elif word == \"apple\": print(\"familiar, round fleshy fruit\") elif", "word: \") if word == \"a\": print(\"one; any\") elif word", "elif word == \"rhinoceros\": print(\"large thick-skinned animal with one or", "word = input(\"Enter a word: \") if word == \"a\":", "one or two horns on its nose\") else: print(\"That word", "== \"rhinoceros\": print(\"large thick-skinned animal with one or two horns", "\") if word == \"a\": print(\"one; any\") elif word ==", "if word == \"a\": print(\"one; any\") elif word == \"apple\":", "\"apple\": print(\"familiar, round fleshy fruit\") elif word == \"rhinoceros\": print(\"large", "animal with one or two horns on its nose\") else:", "word == \"a\": print(\"one; any\") elif word == \"apple\": print(\"familiar,", "or two horns on its nose\") else: print(\"That word must", "on its nose\") else: print(\"That word must not exist. This", "print(\"large thick-skinned animal with one or two horns on its", "word == \"rhinoceros\": print(\"large thick-skinned animal with one or two", "two horns on its nose\") else: print(\"That word must not", "\"rhinoceros\": print(\"large thick-skinned animal with one or two horns on", "round fleshy fruit\") elif word == \"rhinoceros\": print(\"large thick-skinned animal", "\"a\": print(\"one; any\") elif word == \"apple\": print(\"familiar, round fleshy", "fleshy fruit\") elif word == \"rhinoceros\": print(\"large thick-skinned animal with", "horns on its nose\") else: print(\"That word must not exist.", "with one or two horns on its nose\") else: print(\"That", "== \"apple\": print(\"familiar, round fleshy fruit\") elif word == \"rhinoceros\":", "fruit\") elif word == \"rhinoceros\": print(\"large thick-skinned animal with one", "word == \"apple\": print(\"familiar, round fleshy fruit\") elif word ==", "thick-skinned animal with one or two horns on its nose\")", "elif word == \"apple\": print(\"familiar, round fleshy fruit\") elif word", "else: print(\"That word must not exist. This dictionary is very", "print(\"familiar, round fleshy fruit\") elif word == \"rhinoceros\": print(\"large thick-skinned", "<filename>dictionary.py word = input(\"Enter a word: \") if word ==", "= input(\"Enter a word: \") if word == \"a\": print(\"one;", "== \"a\": print(\"one; any\") elif word == \"apple\": print(\"familiar, round", "its nose\") else: print(\"That word must not exist. This dictionary", "print(\"That word must not exist. This dictionary is very comprehensive.\")", "input(\"Enter a word: \") if word == \"a\": print(\"one; any\")", "nose\") else: print(\"That word must not exist. This dictionary is" ]
[ "0 for i in range(len(num)): sum = sum + num[i]", "input())) sum = 0 for i in range(len(num)): sum =", "int(input()) num = list(map(int, input())) sum = 0 for i", "<gh_stars>0 cnt = int(input()) num = list(map(int, input())) sum =", "for i in range(len(num)): sum = sum + num[i] print(sum)", "list(map(int, input())) sum = 0 for i in range(len(num)): sum", "= list(map(int, input())) sum = 0 for i in range(len(num)):", "num = list(map(int, input())) sum = 0 for i in", "sum = 0 for i in range(len(num)): sum = sum", "cnt = int(input()) num = list(map(int, input())) sum = 0", "= int(input()) num = list(map(int, input())) sum = 0 for", "= 0 for i in range(len(num)): sum = sum +" ]
[ "version = '0.1', description = 'Kubernetes resource discovery toolkit', author", "repo download_url = 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', # I'll explain this in a", "above version = '0.1', description = 'Kubernetes resource discovery toolkit',", "= ['testing', 'logging', 'example'], # arbitrary keywords classifiers = [],", "in a second keywords = ['testing', 'logging', 'example'], # arbitrary", "resource discovery toolkit', author = '<NAME>', author_email = '<EMAIL>', url", "'https://github.com/sdnhub/kube-navi', # use the URL to the github repo download_url", "toolkit', author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/sdnhub/kube-navi',", "to the github repo download_url = 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', # I'll explain", "url = 'https://github.com/sdnhub/kube-navi', # use the URL to the github", "be the same as the name above version = '0.1',", "import setup setup( name = 'kube_navi', packages = ['kube_navi'], #", "= 'Kubernetes resource discovery toolkit', author = '<NAME>', author_email =", "= '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/sdnhub/kube-navi', # use", "['kube_navi'], # this must be the same as the name", "the URL to the github repo download_url = 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', #", "# this must be the same as the name above", "description = 'Kubernetes resource discovery toolkit', author = '<NAME>', author_email", "= 'kube_navi', packages = ['kube_navi'], # this must be the", "the same as the name above version = '0.1', description", "= 'https://github.com/sdnhub/kube-navi', # use the URL to the github repo", "the github repo download_url = 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', # I'll explain this", "name above version = '0.1', description = 'Kubernetes resource discovery", "this in a second keywords = ['testing', 'logging', 'example'], #", "setup( name = 'kube_navi', packages = ['kube_navi'], # this must", "I'll explain this in a second keywords = ['testing', 'logging',", "github repo download_url = 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', # I'll explain this in", "<gh_stars>0 from distutils.core import setup setup( name = 'kube_navi', packages", "'Kubernetes resource discovery toolkit', author = '<NAME>', author_email = '<EMAIL>',", "discovery toolkit', author = '<NAME>', author_email = '<EMAIL>', url =", "as the name above version = '0.1', description = 'Kubernetes", "= ['kube_navi'], # this must be the same as the", "a second keywords = ['testing', 'logging', 'example'], # arbitrary keywords", "download_url = 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', # I'll explain this in a second", "must be the same as the name above version =", "same as the name above version = '0.1', description =", "URL to the github repo download_url = 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', # I'll", "the name above version = '0.1', description = 'Kubernetes resource", "'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', # I'll explain this in a second keywords =", "use the URL to the github repo download_url = 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz',", "name = 'kube_navi', packages = ['kube_navi'], # this must be", "'kube_navi', packages = ['kube_navi'], # this must be the same", "'<NAME>', author_email = '<EMAIL>', url = 'https://github.com/sdnhub/kube-navi', # use the", "this must be the same as the name above version", "# I'll explain this in a second keywords = ['testing',", "= 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', # I'll explain this in a second keywords", "distutils.core import setup setup( name = 'kube_navi', packages = ['kube_navi'],", "from distutils.core import setup setup( name = 'kube_navi', packages =", "packages = ['kube_navi'], # this must be the same as", "'0.1', description = 'Kubernetes resource discovery toolkit', author = '<NAME>',", "author_email = '<EMAIL>', url = 'https://github.com/sdnhub/kube-navi', # use the URL", "# use the URL to the github repo download_url =", "author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/sdnhub/kube-navi', #", "['testing', 'logging', 'example'], # arbitrary keywords classifiers = [], )", "= '0.1', description = 'Kubernetes resource discovery toolkit', author =", "= '<EMAIL>', url = 'https://github.com/sdnhub/kube-navi', # use the URL to", "'<EMAIL>', url = 'https://github.com/sdnhub/kube-navi', # use the URL to the", "explain this in a second keywords = ['testing', 'logging', 'example'],", "keywords = ['testing', 'logging', 'example'], # arbitrary keywords classifiers =", "second keywords = ['testing', 'logging', 'example'], # arbitrary keywords classifiers", "setup setup( name = 'kube_navi', packages = ['kube_navi'], # this" ]
[ "OF ANY # KIND, either express or implied. See the", "stringValue from ai_flow.common.properties import Properties from ai_flow.meta.metric_meta import MetricMeta, MetricType,", "= ast.literal_eval(properties) return MetricMeta(uuid=metric_meta_result.uuid, name=metric_meta_result.name, dataset_id=metric_meta_result.dataset_id, model_name=metric_meta_result.model_name, model_version=metric_meta_result.model_version, job_id=metric_meta_result.job_id, start_time=metric_meta_result.start_time,", "more contributor license agreements. See the NOTICE file # distributed", "start_time: int, end_time: int, metric_type: MetricType, uri: Text, tags: Text,", "proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) -> MetricSummary: return MetricSummary(uuid=metric_summary_proto.uuid, metric_id=metric_summary_proto.metric_id.value, metric_key=metric_summary_proto.metric_key.value if metric_summary_proto.HasField('metric_key')", "return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=res) else: return ListMetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def", "Apache Software Foundation (ASF) under one # or more contributor", "def _warp_metric_summary_response(metric_summary: Optional[MetricSummary]) -> MetricSummaryResponse: if metric_summary is not None:", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "if isinstance(metric_meta, MetricMeta): return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=[metric_meta_to_proto(metric_meta)]) else: res =", "model_version: Optional[Text], job_id: int, start_time: int, end_time: int, metric_type: MetricType,", "2.0 (the # \"License\"); you may not use this file", "metric_meta is not None: if isinstance(metric_meta, MetricMeta): return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),", "else None, metric_value=metric_summary_proto.metric_value.value if metric_summary_proto.HasField('metric_value') else None ) def _warp_metric_meta_response(metric_meta:", "_warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) -> ListMetricSummaryResponse: if metric_summary is not None: res", "start_time=metric_meta_proto.start_time.value, end_time=metric_meta_proto.end_time.value, metric_type=metric_type, uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri') else None, tags=metric_meta_proto.tags.value if", "import SqlMetricMeta, SqlMetricSummary from ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta def table_to_metric_meta(metric_meta_result)", "for meta in metric_meta: res.append(metric_meta_to_proto(meta)) return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=res) else:", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Optional[MetricSummary]) -> MetricSummaryResponse: if metric_summary is not None: return MetricSummaryResponse(return_code=0,", "MetricSummaryResponse, ListMetricSummaryResponse from ai_flow.rest_endpoint.service import int64Value, stringValue from ai_flow.common.properties import", "isinstance(metric_meta, MetricMeta): return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=[metric_meta_to_proto(metric_meta)]) else: res = []", "specific language governing permissions and limitations # under the License.", "job_id: int, start_time: int, end_time: int, metric_type: MetricType, uri: Text,", "under the License is distributed on an # \"AS IS\"", "metric_meta_to_table(name: Text, dataset_id: int, model_name: Optional[Text], model_version: Optional[Text], job_id: int,", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either", "metric_meta_result.properties if properties is not None: properties = ast.literal_eval(properties) return", "properties is not None: properties = ast.literal_eval(properties) return MetricMeta(uuid=metric_meta_result.uuid, name=metric_meta_result.name,", "metric_description: Text, properties: Properties, store_type: Text = 'SqlAlchemyStore'): if properties", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "_class = MongoMetricSummary else: _class = SqlMetricSummary return _class(metric_id=metric_id, metric_key=metric_key,", "List from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse, \\ MetricSummaryResponse, ListMetricSummaryResponse from", "MetricMeta): return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=[metric_meta_to_proto(metric_meta)]) else: res = [] for", "uri=stringValue(metric_meta.uri), tags=stringValue(metric_meta.tags), metric_description=stringValue(metric_meta.metric_description), properties=metric_meta.properties) def metric_summary_to_proto(metric_summary: MetricSummary) -> MetricSummaryProto: return", "distributed with this work for additional information # regarding copyright", "# under the License. # import ast from typing import", "from ai_flow.rest_endpoint.service import int64Value, stringValue from ai_flow.common.properties import Properties from", "for the # specific language governing permissions and limitations #", "_class = MongoMetricMeta else: _class = SqlMetricMeta return _class(name=name, dataset_id=dataset_id,", "See the License for the # specific language governing permissions", "to in writing, # software distributed under the License is", "ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, \\ SUCCESS, RESOURCE_DOES_NOT_EXIST from", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "metric_value=metric_summary_result.metric_value) def metric_meta_to_table(name: Text, dataset_id: int, model_name: Optional[Text], model_version: Optional[Text],", "-> MetricMeta: if MetricTypeProto.DATASET == metric_meta_proto.metric_type: metric_type = MetricType.DATASET else:", "metric_meta: res.append(metric_meta_to_proto(meta)) return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=res) else: return ListMetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),", "ListMetricSummaryResponse from ai_flow.rest_endpoint.service import int64Value, stringValue from ai_flow.common.properties import Properties", "'SqlAlchemyStore'): if store_type == 'MongoStore': _class = MongoMetricSummary else: _class", "-> MetricMetaResponse: if metric_meta is not None: if isinstance(metric_meta, MetricMeta):", "== MetricType.DATASET: metric_type = MetricTypeProto.DATASET else: metric_type = MetricTypeProto.MODEL return", "MetricType, uri: Text, tags: Text, metric_description: Text, properties: Properties, store_type:", "file # distributed with this work for additional information #", "end_time=metric_meta_proto.end_time.value, metric_type=metric_type, uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri') else None, tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags')", "metric_meta_proto.HasField('uri') else None, tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags') else None, metric_description=metric_meta_proto.metric_description.value if", "metric_value: Text, store_type: Text = 'SqlAlchemyStore'): if store_type == 'MongoStore':", "MetricMeta) -> MetricMetaProto: if metric_meta.metric_type == MetricType.DATASET: metric_type = MetricTypeProto.DATASET", "metric_meta=[metric_meta_to_proto(metric_meta)]) else: res = [] for meta in metric_meta: res.append(metric_meta_to_proto(meta))", "else None, tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags') else None, metric_description=metric_meta_proto.metric_description.value if metric_meta_proto.HasField('metric_description')", "if metric_meta_proto.HasField('tags') else None, metric_description=metric_meta_proto.metric_description.value if metric_meta_proto.HasField('metric_description') else None, properties=metric_meta_proto.properties", "model_version=metric_meta_result.model_version, job_id=metric_meta_result.job_id, start_time=metric_meta_result.start_time, end_time=metric_meta_result.end_time, metric_type=MetricType.value_of(metric_meta_result.metric_type), uri=metric_meta_result.uri, tags=metric_meta_result.tags, metric_description=metric_meta_result.metric_description, properties=properties) def", "-> MetricMetaProto: if metric_meta.metric_type == MetricType.DATASET: metric_type = MetricTypeProto.DATASET else:", "metric_meta.metric_type == MetricType.DATASET: metric_type = MetricTypeProto.DATASET else: metric_type = MetricTypeProto.MODEL", "metric_summary_to_proto(metric_summary: MetricSummary) -> MetricSummaryProto: return MetricSummaryProto(uuid=metric_summary.uuid, metric_id=int64Value(metric_summary.metric_id), metric_key=stringValue(metric_summary.metric_key), metric_value=stringValue(metric_summary.metric_value)) def", "metric_summary=None) def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) -> ListMetricSummaryResponse: if metric_summary is not", "metric_id=metric_summary_result.metric_id, metric_key=metric_summary_result.metric_key, metric_value=metric_summary_result.metric_value) def metric_meta_to_table(name: Text, dataset_id: int, model_name: Optional[Text],", "in metric_summary: res.append(metric_summary_to_proto(summary)) return ListMetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=res) else: return ListMetricSummaryResponse(return_code=1,", "metric_key=metric_summary_proto.metric_key.value if metric_summary_proto.HasField('metric_key') else None, metric_value=metric_summary_proto.metric_value.value if metric_summary_proto.HasField('metric_value') else None", "Text, store_type: Text = 'SqlAlchemyStore'): if store_type == 'MongoStore': _class", "implied. See the License for the # specific language governing", "to you under the Apache License, Version 2.0 (the #", "import MongoMetricSummary, MongoMetricMeta def table_to_metric_meta(metric_meta_result) -> MetricMeta: properties = metric_meta_result.properties", "properties is not None: properties = str(properties) if store_type ==", "ai_flow.rest_endpoint.service import int64Value, stringValue from ai_flow.common.properties import Properties from ai_flow.meta.metric_meta", "table_to_metric_summary(metric_summary_result) -> MetricSummary: return MetricSummary(uuid=metric_summary_result.uuid, metric_id=metric_summary_result.metric_id, metric_key=metric_summary_result.metric_key, metric_value=metric_summary_result.metric_value) def metric_meta_to_table(name:", "MongoMetricSummary else: _class = SqlMetricSummary return _class(metric_id=metric_id, metric_key=metric_key, metric_value=metric_value) def", "not None: res = [] for summary in metric_summary: res.append(metric_summary_to_proto(summary))", "may not use this file except in compliance # with", "res = [] for meta in metric_meta: res.append(metric_meta_to_proto(meta)) return ListMetricMetaResponse(return_code=0,", "MetricType.DATASET else: metric_type = MetricType.MODEL return MetricMeta(uuid=metric_meta_proto.uuid, name=metric_meta_proto.name.value, dataset_id=metric_meta_proto.dataset_id.value, model_name=metric_meta_proto.model_name.value,", "model_version=stringValue(metric_meta.model_version), job_id=int64Value(metric_meta.job_id), start_time=int64Value(metric_meta.start_time), end_time=int64Value(metric_meta.end_time), metric_type=metric_type, uri=stringValue(metric_meta.uri), tags=stringValue(metric_meta.tags), metric_description=stringValue(metric_meta.metric_description), properties=metric_meta.properties) def", "import Properties from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary from ai_flow.rest_endpoint.protobuf.message_pb2", "def metric_summary_to_table(metric_id: int, metric_key: Text, metric_value: Text, store_type: Text =", "else: return ListMetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_metric_summary_response(metric_summary: Optional[MetricSummary]) -> MetricSummaryResponse:", "properties=properties) def metric_summary_to_table(metric_id: int, metric_key: Text, metric_value: Text, store_type: Text", "[] for summary in metric_summary: res.append(metric_summary_to_proto(summary)) return ListMetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=res)", ") def _warp_metric_meta_response(metric_meta: Optional[MetricMeta]) -> MetricMetaResponse: if metric_meta is not", "end_time: int, metric_type: MetricType, uri: Text, tags: Text, metric_description: Text,", "License, Version 2.0 (the # \"License\"); you may not use", "model_version=model_version, job_id=job_id, start_time=start_time, end_time=end_time, metric_type=metric_type.value, uri=uri, tags=tags, metric_description=metric_description, properties=properties) def", "either express or implied. See the License for the #", "if metric_meta.metric_type == MetricType.DATASET: metric_type = MetricTypeProto.DATASET else: metric_type =", "None: return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=metric_meta_to_proto(metric_meta)) else: return MetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None)", "MongoMetricMeta def table_to_metric_meta(metric_meta_result) -> MetricMeta: properties = metric_meta_result.properties if properties", "MetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None) def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) -> ListMetricSummaryResponse: if metric_summary", "== 'MongoStore': _class = MongoMetricSummary else: _class = SqlMetricSummary return", "additional information # regarding copyright ownership. The ASF licenses this", "if MetricTypeProto.DATASET == metric_meta_proto.metric_type: metric_type = MetricType.DATASET else: metric_type =", "def _warp_metric_meta_response(metric_meta: Optional[MetricMeta]) -> MetricMetaResponse: if metric_meta is not None:", "See the NOTICE file # distributed with this work for", "MetricTypeProto.MODEL return MetricMetaProto(uuid=metric_meta.uuid, name=stringValue(metric_meta.name), dataset_id=int64Value(metric_meta.dataset_id), model_name=stringValue(metric_meta.model_name), model_version=stringValue(metric_meta.model_version), job_id=int64Value(metric_meta.job_id), start_time=int64Value(metric_meta.start_time), end_time=int64Value(metric_meta.end_time),", "Text, Optional, Union, List from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse, \\", "return ListMetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_metric_summary_response(metric_summary: Optional[MetricSummary]) -> MetricSummaryResponse: if", "metric_key=metric_summary_result.metric_key, metric_value=metric_summary_result.metric_value) def metric_meta_to_table(name: Text, dataset_id: int, model_name: Optional[Text], model_version:", "import ast from typing import Text, Optional, Union, List from", "None: return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=metric_summary_to_proto(metric_summary)) else: return MetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None)", "SqlMetricSummary from ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta def table_to_metric_meta(metric_meta_result) -> MetricMeta:", "uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri') else None, tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags') else None,", "metric_meta=metric_meta_to_proto(metric_meta)) else: return MetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta,", "if metric_summary is not None: return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=metric_summary_to_proto(metric_summary)) else:", "Apache License, Version 2.0 (the # \"License\"); you may not", "metric_meta is not None: return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=metric_meta_to_proto(metric_meta)) else: return", "'MongoStore': _class = MongoMetricSummary else: _class = SqlMetricSummary return _class(metric_id=metric_id,", "None: if isinstance(metric_meta, MetricMeta): return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=[metric_meta_to_proto(metric_meta)]) else: res", "name=metric_meta_proto.name.value, dataset_id=metric_meta_proto.dataset_id.value, model_name=metric_meta_proto.model_name.value, model_version=metric_meta_proto.model_version.value, job_id=metric_meta_proto.job_id.value, start_time=metric_meta_proto.start_time.value, end_time=metric_meta_proto.end_time.value, metric_type=metric_type, uri=metric_meta_proto.uri.value if", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "metric_key=metric_key, metric_value=metric_value) def metric_meta_to_proto(metric_meta: MetricMeta) -> MetricMetaProto: if metric_meta.metric_type ==", "ListMetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_metric_summary_response(metric_summary: Optional[MetricSummary]) -> MetricSummaryResponse: if metric_summary", "file except in compliance # with the License. You may", "ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=res) else: return ListMetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_metric_summary_response(metric_summary:", "MetricMetaResponse: if metric_meta is not None: return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=metric_meta_to_proto(metric_meta))", "# specific language governing permissions and limitations # under the", "res.append(metric_meta_to_proto(meta)) return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=res) else: return ListMetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None)", "MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=metric_summary_to_proto(metric_summary)) else: return MetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None) def _warp_list_metric_summary_response(metric_summary:", "meta in metric_meta: res.append(metric_meta_to_proto(meta)) return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=res) else: return", "= 'SqlAlchemyStore'): if properties is not None: properties = str(properties)", "you may not use this file except in compliance #", "use this file except in compliance # with the License.", "SUCCESS, RESOURCE_DOES_NOT_EXIST from ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary from ai_flow.store.db.db_model import", "dataset_id=dataset_id, model_name=model_name, model_version=model_version, job_id=job_id, start_time=start_time, end_time=end_time, metric_type=metric_type.value, uri=uri, tags=tags, metric_description=metric_description,", "Optional[MetricMeta]) -> MetricMetaResponse: if metric_meta is not None: return MetricMetaResponse(return_code=0,", "permissions and limitations # under the License. # import ast", "contributor license agreements. See the NOTICE file # distributed with", "def _warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta, List[MetricMeta]]) -> MetricMetaResponse: if metric_meta is", "\\ MetricSummaryResponse, ListMetricSummaryResponse from ai_flow.rest_endpoint.service import int64Value, stringValue from ai_flow.common.properties", "an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "start_time=start_time, end_time=end_time, metric_type=metric_type.value, uri=uri, tags=tags, metric_description=metric_description, properties=properties) def metric_summary_to_table(metric_id: int,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "with this work for additional information # regarding copyright ownership.", "Text, tags: Text, metric_description: Text, properties: Properties, store_type: Text =", "not None: if isinstance(metric_meta, MetricMeta): return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=[metric_meta_to_proto(metric_meta)]) else:", "tags=metric_meta_result.tags, metric_description=metric_meta_result.metric_description, properties=properties) def table_to_metric_summary(metric_summary_result) -> MetricSummary: return MetricSummary(uuid=metric_summary_result.uuid, metric_id=metric_summary_result.metric_id,", "model_name=stringValue(metric_meta.model_name), model_version=stringValue(metric_meta.model_version), job_id=int64Value(metric_meta.job_id), start_time=int64Value(metric_meta.start_time), end_time=int64Value(metric_meta.end_time), metric_type=metric_type, uri=stringValue(metric_meta.uri), tags=stringValue(metric_meta.tags), metric_description=stringValue(metric_meta.metric_description), properties=metric_meta.properties)", "metric_type=metric_type.value, uri=uri, tags=tags, metric_description=metric_description, properties=properties) def metric_summary_to_table(metric_id: int, metric_key: Text,", "work for additional information # regarding copyright ownership. The ASF", "distributed under the License is distributed on an # \"AS", "else: metric_type = MetricTypeProto.MODEL return MetricMetaProto(uuid=metric_meta.uuid, name=stringValue(metric_meta.name), dataset_id=int64Value(metric_meta.dataset_id), model_name=stringValue(metric_meta.model_name), model_version=stringValue(metric_meta.model_version),", "# software distributed under the License is distributed on an", "MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=metric_meta_to_proto(metric_meta)) else: return MetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_list_metric_meta_response(metric_meta:", "the License. You may obtain a copy of the License", "model_name: Optional[Text], model_version: Optional[Text], job_id: int, start_time: int, end_time: int,", "None, tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags') else None, metric_description=metric_meta_proto.metric_description.value if metric_meta_proto.HasField('metric_description') else", "Text = 'SqlAlchemyStore'): if store_type == 'MongoStore': _class = MongoMetricSummary", "if metric_meta is not None: if isinstance(metric_meta, MetricMeta): return ListMetricMetaResponse(return_code=0,", "return MetricSummaryProto(uuid=metric_summary.uuid, metric_id=int64Value(metric_summary.metric_id), metric_key=stringValue(metric_summary.metric_key), metric_value=stringValue(metric_summary.metric_value)) def proto_to_metric_meta(metric_meta_proto: MetricMetaProto) -> MetricMeta:", "if metric_summary_proto.HasField('metric_key') else None, metric_value=metric_summary_proto.metric_value.value if metric_summary_proto.HasField('metric_value') else None )", "metric_id=metric_summary_proto.metric_id.value, metric_key=metric_summary_proto.metric_key.value if metric_summary_proto.HasField('metric_key') else None, metric_value=metric_summary_proto.metric_value.value if metric_summary_proto.HasField('metric_value') else", "under the Apache License, Version 2.0 (the # \"License\"); you", "not None: properties = ast.literal_eval(properties) return MetricMeta(uuid=metric_meta_result.uuid, name=metric_meta_result.name, dataset_id=metric_meta_result.dataset_id, model_name=metric_meta_result.model_name,", "RESOURCE_DOES_NOT_EXIST from ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary from ai_flow.store.db.db_model import MongoMetricSummary,", "else None, properties=metric_meta_proto.properties ) def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) -> MetricSummary: return", "distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "regarding copyright ownership. The ASF licenses this file # to", "dataset_id=metric_meta_proto.dataset_id.value, model_name=metric_meta_proto.model_name.value, model_version=metric_meta_proto.model_version.value, job_id=metric_meta_proto.job_id.value, start_time=metric_meta_proto.start_time.value, end_time=metric_meta_proto.end_time.value, metric_type=metric_type, uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri')", "MetricSummary: return MetricSummary(uuid=metric_summary_proto.uuid, metric_id=metric_summary_proto.metric_id.value, metric_key=metric_summary_proto.metric_key.value if metric_summary_proto.HasField('metric_key') else None, metric_value=metric_summary_proto.metric_value.value", "or agreed to in writing, # software distributed under the", "metric_description=metric_description, properties=properties) def metric_summary_to_table(metric_id: int, metric_key: Text, metric_value: Text, store_type:", "MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, \\ SUCCESS, RESOURCE_DOES_NOT_EXIST from ai_flow.store.db.db_model import", "License. # import ast from typing import Text, Optional, Union,", "SqlMetricMeta return _class(name=name, dataset_id=dataset_id, model_name=model_name, model_version=model_version, job_id=job_id, start_time=start_time, end_time=end_time, metric_type=metric_type.value,", "return MetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta, List[MetricMeta]]) ->", "ListMetricMetaResponse, \\ MetricSummaryResponse, ListMetricSummaryResponse from ai_flow.rest_endpoint.service import int64Value, stringValue from", "return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=res) else: return ListMetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_metric_summary_response(metric_summary: Optional[MetricSummary])", "if metric_summary_proto.HasField('metric_value') else None ) def _warp_metric_meta_response(metric_meta: Optional[MetricMeta]) -> MetricMetaResponse:", "store_type: Text = 'SqlAlchemyStore'): if properties is not None: properties", "dataset_id: int, model_name: Optional[Text], model_version: Optional[Text], job_id: int, start_time: int,", "MetricSummary(uuid=metric_summary_result.uuid, metric_id=metric_summary_result.metric_id, metric_key=metric_summary_result.metric_key, metric_value=metric_summary_result.metric_value) def metric_meta_to_table(name: Text, dataset_id: int, model_name:", "MongoMetricMeta else: _class = SqlMetricMeta return _class(name=name, dataset_id=dataset_id, model_name=model_name, model_version=model_version,", "metric_description=metric_meta_result.metric_description, properties=properties) def table_to_metric_summary(metric_summary_result) -> MetricSummary: return MetricSummary(uuid=metric_summary_result.uuid, metric_id=metric_summary_result.metric_id, metric_key=metric_summary_result.metric_key,", "import int64Value, stringValue from ai_flow.common.properties import Properties from ai_flow.meta.metric_meta import", "metric_id=int64Value(metric_summary.metric_id), metric_key=stringValue(metric_summary.metric_key), metric_value=stringValue(metric_summary.metric_value)) def proto_to_metric_meta(metric_meta_proto: MetricMetaProto) -> MetricMeta: if MetricTypeProto.DATASET", "dataset_id=int64Value(metric_meta.dataset_id), model_name=stringValue(metric_meta.model_name), model_version=stringValue(metric_meta.model_version), job_id=int64Value(metric_meta.job_id), start_time=int64Value(metric_meta.start_time), end_time=int64Value(metric_meta.end_time), metric_type=metric_type, uri=stringValue(metric_meta.uri), tags=stringValue(metric_meta.tags), metric_description=stringValue(metric_meta.metric_description),", "def proto_to_metric_meta(metric_meta_proto: MetricMetaProto) -> MetricMeta: if MetricTypeProto.DATASET == metric_meta_proto.metric_type: metric_type", "or more contributor license agreements. See the NOTICE file #", "else: _class = SqlMetricMeta return _class(name=name, dataset_id=dataset_id, model_name=model_name, model_version=model_version, job_id=job_id,", "properties = ast.literal_eval(properties) return MetricMeta(uuid=metric_meta_result.uuid, name=metric_meta_result.name, dataset_id=metric_meta_result.dataset_id, model_name=metric_meta_result.model_name, model_version=metric_meta_result.model_version, job_id=metric_meta_result.job_id,", "None: properties = str(properties) if store_type == 'MongoStore': _class =", "this work for additional information # regarding copyright ownership. The", "and limitations # under the License. # import ast from", "if metric_meta_proto.HasField('metric_description') else None, properties=metric_meta_proto.properties ) def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) ->", "metric_meta=None) def _warp_metric_summary_response(metric_summary: Optional[MetricSummary]) -> MetricSummaryResponse: if metric_summary is not", "the NOTICE file # distributed with this work for additional", "MongoMetricSummary, MongoMetricMeta def table_to_metric_meta(metric_meta_result) -> MetricMeta: properties = metric_meta_result.properties if", "job_id=metric_meta_proto.job_id.value, start_time=metric_meta_proto.start_time.value, end_time=metric_meta_proto.end_time.value, metric_type=metric_type, uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri') else None, tags=metric_meta_proto.tags.value", "-> MetricSummary: return MetricSummary(uuid=metric_summary_proto.uuid, metric_id=metric_summary_proto.metric_id.value, metric_key=metric_summary_proto.metric_key.value if metric_summary_proto.HasField('metric_key') else None,", "ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta def table_to_metric_meta(metric_meta_result) -> MetricMeta: properties =", "if store_type == 'MongoStore': _class = MongoMetricMeta else: _class =", "MetricSummaryProto) -> MetricSummary: return MetricSummary(uuid=metric_summary_proto.uuid, metric_id=metric_summary_proto.metric_id.value, metric_key=metric_summary_proto.metric_key.value if metric_summary_proto.HasField('metric_key') else", "job_id=metric_meta_result.job_id, start_time=metric_meta_result.start_time, end_time=metric_meta_result.end_time, metric_type=MetricType.value_of(metric_meta_result.metric_type), uri=metric_meta_result.uri, tags=metric_meta_result.tags, metric_description=metric_meta_result.metric_description, properties=properties) def table_to_metric_summary(metric_summary_result)", "if properties is not None: properties = ast.literal_eval(properties) return MetricMeta(uuid=metric_meta_result.uuid,", "str(properties) if store_type == 'MongoStore': _class = MongoMetricMeta else: _class", "return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None) def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) -> ListMetricSummaryResponse: if metric_summary is", "else: return MetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None) def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) -> ListMetricSummaryResponse:", "int, end_time: int, metric_type: MetricType, uri: Text, tags: Text, metric_description:", "properties=metric_meta_proto.properties ) def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) -> MetricSummary: return MetricSummary(uuid=metric_summary_proto.uuid, metric_id=metric_summary_proto.metric_id.value,", "store_type == 'MongoStore': _class = MongoMetricSummary else: _class = SqlMetricSummary", "return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=[metric_meta_to_proto(metric_meta)]) else: res = [] for meta", "if properties is not None: properties = str(properties) if store_type", "KIND, either express or implied. See the License for the", "model_name=model_name, model_version=model_version, job_id=job_id, start_time=start_time, end_time=end_time, metric_type=metric_type.value, uri=uri, tags=tags, metric_description=metric_description, properties=properties)", "metric_type=metric_type, uri=stringValue(metric_meta.uri), tags=stringValue(metric_meta.tags), metric_description=stringValue(metric_meta.metric_description), properties=metric_meta.properties) def metric_summary_to_proto(metric_summary: MetricSummary) -> MetricSummaryProto:", "== metric_meta_proto.metric_type: metric_type = MetricType.DATASET else: metric_type = MetricType.MODEL return", "limitations # under the License. # import ast from typing", "or implied. See the License for the # specific language", "express or implied. See the License for the # specific", "return _class(name=name, dataset_id=dataset_id, model_name=model_name, model_version=model_version, job_id=job_id, start_time=start_time, end_time=end_time, metric_type=metric_type.value, uri=uri,", "MetricSummaryProto, MetricTypeProto, ReturnCode, \\ SUCCESS, RESOURCE_DOES_NOT_EXIST from ai_flow.store.db.db_model import SqlMetricMeta,", "# import ast from typing import Text, Optional, Union, List", "MetricMetaProto: if metric_meta.metric_type == MetricType.DATASET: metric_type = MetricTypeProto.DATASET else: metric_type", "metric_meta=None) def _warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta, List[MetricMeta]]) -> MetricMetaResponse: if metric_meta", "is not None: return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=metric_meta_to_proto(metric_meta)) else: return MetricMetaResponse(return_code=1,", "-> MetricMetaResponse: if metric_meta is not None: return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),", "the # specific language governing permissions and limitations # under", "def metric_summary_to_proto(metric_summary: MetricSummary) -> MetricSummaryProto: return MetricSummaryProto(uuid=metric_summary.uuid, metric_id=int64Value(metric_summary.metric_id), metric_key=stringValue(metric_summary.metric_key), metric_value=stringValue(metric_summary.metric_value))", "ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto,", "may obtain a copy of the License at # #", "= MetricTypeProto.DATASET else: metric_type = MetricTypeProto.MODEL return MetricMetaProto(uuid=metric_meta.uuid, name=stringValue(metric_meta.name), dataset_id=int64Value(metric_meta.dataset_id),", "The ASF licenses this file # to you under the", "not None: return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=metric_meta_to_proto(metric_meta)) else: return MetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),", "def metric_meta_to_proto(metric_meta: MetricMeta) -> MetricMetaProto: if metric_meta.metric_type == MetricType.DATASET: metric_type", "Text = 'SqlAlchemyStore'): if properties is not None: properties =", "# Licensed to the Apache Software Foundation (ASF) under one", "Properties from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary from ai_flow.rest_endpoint.protobuf.message_pb2 import", "import MetricMetaResponse, ListMetricMetaResponse, \\ MetricSummaryResponse, ListMetricSummaryResponse from ai_flow.rest_endpoint.service import int64Value,", "summary in metric_summary: res.append(metric_summary_to_proto(summary)) return ListMetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=res) else: return", "law or agreed to in writing, # software distributed under", "from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse, \\ MetricSummaryResponse, ListMetricSummaryResponse from ai_flow.rest_endpoint.service", "= SqlMetricMeta return _class(name=name, dataset_id=dataset_id, model_name=model_name, model_version=model_version, job_id=job_id, start_time=start_time, end_time=end_time,", "metric_description=metric_meta_proto.metric_description.value if metric_meta_proto.HasField('metric_description') else None, properties=metric_meta_proto.properties ) def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto)", "Foundation (ASF) under one # or more contributor license agreements.", "None: res = [] for summary in metric_summary: res.append(metric_summary_to_proto(summary)) return", "= [] for summary in metric_summary: res.append(metric_summary_to_proto(summary)) return ListMetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "= [] for meta in metric_meta: res.append(metric_meta_to_proto(meta)) return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),", "metric_key=stringValue(metric_summary.metric_key), metric_value=stringValue(metric_summary.metric_value)) def proto_to_metric_meta(metric_meta_proto: MetricMetaProto) -> MetricMeta: if MetricTypeProto.DATASET ==", "return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=metric_summary_to_proto(metric_summary)) else: return MetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None) def", "metric_type = MetricTypeProto.MODEL return MetricMetaProto(uuid=metric_meta.uuid, name=stringValue(metric_meta.name), dataset_id=int64Value(metric_meta.dataset_id), model_name=stringValue(metric_meta.model_name), model_version=stringValue(metric_meta.model_version), job_id=int64Value(metric_meta.job_id),", "Software Foundation (ASF) under one # or more contributor license", "# regarding copyright ownership. The ASF licenses this file #", "MetricType.DATASET: metric_type = MetricTypeProto.DATASET else: metric_type = MetricTypeProto.MODEL return MetricMetaProto(uuid=metric_meta.uuid,", "in compliance # with the License. You may obtain a", "# to you under the Apache License, Version 2.0 (the", "License for the # specific language governing permissions and limitations", "OR CONDITIONS OF ANY # KIND, either express or implied.", "MetricMeta(uuid=metric_meta_proto.uuid, name=metric_meta_proto.name.value, dataset_id=metric_meta_proto.dataset_id.value, model_name=metric_meta_proto.model_name.value, model_version=metric_meta_proto.model_version.value, job_id=metric_meta_proto.job_id.value, start_time=metric_meta_proto.start_time.value, end_time=metric_meta_proto.end_time.value, metric_type=metric_type, uri=metric_meta_proto.uri.value", "metric_summary is not None: res = [] for summary in", "MetricSummaryProto: return MetricSummaryProto(uuid=metric_summary.uuid, metric_id=int64Value(metric_summary.metric_id), metric_key=stringValue(metric_summary.metric_key), metric_value=stringValue(metric_summary.metric_value)) def proto_to_metric_meta(metric_meta_proto: MetricMetaProto) ->", "_warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta, List[MetricMeta]]) -> MetricMetaResponse: if metric_meta is not", "Optional[Text], job_id: int, start_time: int, end_time: int, metric_type: MetricType, uri:", "Text, dataset_id: int, model_name: Optional[Text], model_version: Optional[Text], job_id: int, start_time:", "dataset_id=metric_meta_result.dataset_id, model_name=metric_meta_result.model_name, model_version=metric_meta_result.model_version, job_id=metric_meta_result.job_id, start_time=metric_meta_result.start_time, end_time=metric_meta_result.end_time, metric_type=MetricType.value_of(metric_meta_result.metric_type), uri=metric_meta_result.uri, tags=metric_meta_result.tags, metric_description=metric_meta_result.metric_description,", "'SqlAlchemyStore'): if properties is not None: properties = str(properties) if", "MetricMetaProto) -> MetricMeta: if MetricTypeProto.DATASET == metric_meta_proto.metric_type: metric_type = MetricType.DATASET", "this file # to you under the Apache License, Version", "MetricSummaryProto(uuid=metric_summary.uuid, metric_id=int64Value(metric_summary.metric_id), metric_key=stringValue(metric_summary.metric_key), metric_value=stringValue(metric_summary.metric_value)) def proto_to_metric_meta(metric_meta_proto: MetricMetaProto) -> MetricMeta: if", "copyright ownership. The ASF licenses this file # to you", "metric_type: MetricType, uri: Text, tags: Text, metric_description: Text, properties: Properties,", "from typing import Text, Optional, Union, List from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import", "metric_summary_to_table(metric_id: int, metric_key: Text, metric_value: Text, store_type: Text = 'SqlAlchemyStore'):", "MetricMetaResponse: if metric_meta is not None: if isinstance(metric_meta, MetricMeta): return", "properties=metric_meta.properties) def metric_summary_to_proto(metric_summary: MetricSummary) -> MetricSummaryProto: return MetricSummaryProto(uuid=metric_summary.uuid, metric_id=int64Value(metric_summary.metric_id), metric_key=stringValue(metric_summary.metric_key),", "MetricSummary from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, \\ SUCCESS,", "in writing, # software distributed under the License is distributed", "res = [] for summary in metric_summary: res.append(metric_summary_to_proto(summary)) return ListMetricSummaryResponse(return_code=0,", "is not None: res = [] for summary in metric_summary:", "metric_value=stringValue(metric_summary.metric_value)) def proto_to_metric_meta(metric_meta_proto: MetricMetaProto) -> MetricMeta: if MetricTypeProto.DATASET == metric_meta_proto.metric_type:", "end_time=int64Value(metric_meta.end_time), metric_type=metric_type, uri=stringValue(metric_meta.uri), tags=stringValue(metric_meta.tags), metric_description=stringValue(metric_meta.metric_description), properties=metric_meta.properties) def metric_summary_to_proto(metric_summary: MetricSummary) ->", "return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_metric_summary_response(metric_summary: Optional[MetricSummary]) -> MetricSummaryResponse: if metric_summary is", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "License is distributed on an # \"AS IS\" BASIS, WITHOUT", "= MetricType.MODEL return MetricMeta(uuid=metric_meta_proto.uuid, name=metric_meta_proto.name.value, dataset_id=metric_meta_proto.dataset_id.value, model_name=metric_meta_proto.model_name.value, model_version=metric_meta_proto.model_version.value, job_id=metric_meta_proto.job_id.value, start_time=metric_meta_proto.start_time.value,", "name=stringValue(metric_meta.name), dataset_id=int64Value(metric_meta.dataset_id), model_name=stringValue(metric_meta.model_name), model_version=stringValue(metric_meta.model_version), job_id=int64Value(metric_meta.job_id), start_time=int64Value(metric_meta.start_time), end_time=int64Value(metric_meta.end_time), metric_type=metric_type, uri=stringValue(metric_meta.uri), tags=stringValue(metric_meta.tags),", "MetricMeta: if MetricTypeProto.DATASET == metric_meta_proto.metric_type: metric_type = MetricType.DATASET else: metric_type", "MetricSummary(uuid=metric_summary_proto.uuid, metric_id=metric_summary_proto.metric_id.value, metric_key=metric_summary_proto.metric_key.value if metric_summary_proto.HasField('metric_key') else None, metric_value=metric_summary_proto.metric_value.value if metric_summary_proto.HasField('metric_value')", "end_time=end_time, metric_type=metric_type.value, uri=uri, tags=tags, metric_description=metric_description, properties=properties) def metric_summary_to_table(metric_id: int, metric_key:", "ast.literal_eval(properties) return MetricMeta(uuid=metric_meta_result.uuid, name=metric_meta_result.name, dataset_id=metric_meta_result.dataset_id, model_name=metric_meta_result.model_name, model_version=metric_meta_result.model_version, job_id=metric_meta_result.job_id, start_time=metric_meta_result.start_time, end_time=metric_meta_result.end_time,", "is not None: return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=metric_summary_to_proto(metric_summary)) else: return MetricSummaryResponse(return_code=1,", "if store_type == 'MongoStore': _class = MongoMetricSummary else: _class =", "return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=metric_summary_to_proto(metric_summary)) else: return MetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None) def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]])", "start_time=metric_meta_result.start_time, end_time=metric_meta_result.end_time, metric_type=MetricType.value_of(metric_meta_result.metric_type), uri=metric_meta_result.uri, tags=metric_meta_result.tags, metric_description=metric_meta_result.metric_description, properties=properties) def table_to_metric_summary(metric_summary_result) ->", "model_version=metric_meta_proto.model_version.value, job_id=metric_meta_proto.job_id.value, start_time=metric_meta_proto.start_time.value, end_time=metric_meta_proto.end_time.value, metric_type=metric_type, uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri') else None,", "# \"License\"); you may not use this file except in", "_warp_metric_summary_response(metric_summary: Optional[MetricSummary]) -> MetricSummaryResponse: if metric_summary is not None: return", "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to the Apache Software Foundation (ASF) under one # or", "\"License\"); you may not use this file except in compliance", "ast from typing import Text, Optional, Union, List from ai_flow.rest_endpoint.protobuf.metric_service_pb2", "MetricSummaryResponse: if metric_summary is not None: return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=metric_summary_to_proto(metric_summary))", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "-> MetricSummary: return MetricSummary(uuid=metric_summary_result.uuid, metric_id=metric_summary_result.metric_id, metric_key=metric_summary_result.metric_key, metric_value=metric_summary_result.metric_value) def metric_meta_to_table(name: Text,", "return MetricSummary(uuid=metric_summary_result.uuid, metric_id=metric_summary_result.metric_id, metric_key=metric_summary_result.metric_key, metric_value=metric_summary_result.metric_value) def metric_meta_to_table(name: Text, dataset_id: int,", "properties = metric_meta_result.properties if properties is not None: properties =", "return MetricMetaProto(uuid=metric_meta.uuid, name=stringValue(metric_meta.name), dataset_id=int64Value(metric_meta.dataset_id), model_name=stringValue(metric_meta.model_name), model_version=stringValue(metric_meta.model_version), job_id=int64Value(metric_meta.job_id), start_time=int64Value(metric_meta.start_time), end_time=int64Value(metric_meta.end_time), metric_type=metric_type,", "# distributed with this work for additional information # regarding", "return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=[metric_meta_to_proto(metric_meta)]) else: res = [] for meta in metric_meta:", "writing, # software distributed under the License is distributed on", "is not None: properties = ast.literal_eval(properties) return MetricMeta(uuid=metric_meta_result.uuid, name=metric_meta_result.name, dataset_id=metric_meta_result.dataset_id,", "SqlMetricMeta, SqlMetricSummary from ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta def table_to_metric_meta(metric_meta_result) ->", "_class = SqlMetricSummary return _class(metric_id=metric_id, metric_key=metric_key, metric_value=metric_value) def metric_meta_to_proto(metric_meta: MetricMeta)", "_class(metric_id=metric_id, metric_key=metric_key, metric_value=metric_value) def metric_meta_to_proto(metric_meta: MetricMeta) -> MetricMetaProto: if metric_meta.metric_type", "-> MetricMeta: properties = metric_meta_result.properties if properties is not None:", "= MongoMetricMeta else: _class = SqlMetricMeta return _class(name=name, dataset_id=dataset_id, model_name=model_name,", "Text, metric_value: Text, store_type: Text = 'SqlAlchemyStore'): if store_type ==", "uri: Text, tags: Text, metric_description: Text, properties: Properties, store_type: Text", "CONDITIONS OF ANY # KIND, either express or implied. See", "Text, properties: Properties, store_type: Text = 'SqlAlchemyStore'): if properties is", "= MongoMetricSummary else: _class = SqlMetricSummary return _class(metric_id=metric_id, metric_key=metric_key, metric_value=metric_value)", "Union[None, MetricMeta, List[MetricMeta]]) -> MetricMetaResponse: if metric_meta is not None:", "import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, \\ SUCCESS, RESOURCE_DOES_NOT_EXIST from ai_flow.store.db.db_model", "MetricTypeProto, ReturnCode, \\ SUCCESS, RESOURCE_DOES_NOT_EXIST from ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary", "None: properties = ast.literal_eval(properties) return MetricMeta(uuid=metric_meta_result.uuid, name=metric_meta_result.name, dataset_id=metric_meta_result.dataset_id, model_name=metric_meta_result.model_name, model_version=metric_meta_result.model_version,", "metric_type=MetricType.value_of(metric_meta_result.metric_type), uri=metric_meta_result.uri, tags=metric_meta_result.tags, metric_description=metric_meta_result.metric_description, properties=properties) def table_to_metric_summary(metric_summary_result) -> MetricSummary: return", "metric_summary_proto.HasField('metric_value') else None ) def _warp_metric_meta_response(metric_meta: Optional[MetricMeta]) -> MetricMetaResponse: if", "for additional information # regarding copyright ownership. The ASF licenses", "the Apache Software Foundation (ASF) under one # or more", "# # Unless required by applicable law or agreed to", "Version 2.0 (the # \"License\"); you may not use this", "import Text, Optional, Union, List from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse,", "MetricSummary) -> MetricSummaryProto: return MetricSummaryProto(uuid=metric_summary.uuid, metric_id=int64Value(metric_summary.metric_id), metric_key=stringValue(metric_summary.metric_key), metric_value=stringValue(metric_summary.metric_value)) def proto_to_metric_meta(metric_meta_proto:", "ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=[metric_meta_to_proto(metric_meta)]) else: res = [] for meta in", "one # or more contributor license agreements. See the NOTICE", "metric_summary is not None: return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=metric_summary_to_proto(metric_summary)) else: return", "ai_flow.common.properties import Properties from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary from", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "if metric_summary is not None: res = [] for summary", "import MetricMeta, MetricType, MetricSummary from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto,", "else: metric_type = MetricType.MODEL return MetricMeta(uuid=metric_meta_proto.uuid, name=metric_meta_proto.name.value, dataset_id=metric_meta_proto.dataset_id.value, model_name=metric_meta_proto.model_name.value, model_version=metric_meta_proto.model_version.value,", "except in compliance # with the License. You may obtain", "metric_value=metric_summary_proto.metric_value.value if metric_summary_proto.HasField('metric_value') else None ) def _warp_metric_meta_response(metric_meta: Optional[MetricMeta]) ->", "return MetricSummary(uuid=metric_summary_proto.uuid, metric_id=metric_summary_proto.metric_id.value, metric_key=metric_summary_proto.metric_key.value if metric_summary_proto.HasField('metric_key') else None, metric_value=metric_summary_proto.metric_value.value if", "else None, metric_description=metric_meta_proto.metric_description.value if metric_meta_proto.HasField('metric_description') else None, properties=metric_meta_proto.properties ) def", "Optional, Union, List from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse, \\ MetricSummaryResponse,", "MetricSummary: return MetricSummary(uuid=metric_summary_result.uuid, metric_id=metric_summary_result.metric_id, metric_key=metric_summary_result.metric_key, metric_value=metric_summary_result.metric_value) def metric_meta_to_table(name: Text, dataset_id:", "NOTICE file # distributed with this work for additional information", "return _class(metric_id=metric_id, metric_key=metric_key, metric_value=metric_value) def metric_meta_to_proto(metric_meta: MetricMeta) -> MetricMetaProto: if", "this file except in compliance # with the License. You", "under the License. # import ast from typing import Text,", "= str(properties) if store_type == 'MongoStore': _class = MongoMetricMeta else:", "tags=tags, metric_description=metric_description, properties=properties) def metric_summary_to_table(metric_id: int, metric_key: Text, metric_value: Text,", "Optional[List[MetricSummary]]) -> ListMetricSummaryResponse: if metric_summary is not None: res =", "None, metric_description=metric_meta_proto.metric_description.value if metric_meta_proto.HasField('metric_description') else None, properties=metric_meta_proto.properties ) def proto_to_metric_summary(metric_summary_proto:", "license agreements. See the NOTICE file # distributed with this", "required by applicable law or agreed to in writing, #", "metric_meta_to_proto(metric_meta: MetricMeta) -> MetricMetaProto: if metric_meta.metric_type == MetricType.DATASET: metric_type =", "metric_type = MetricTypeProto.DATASET else: metric_type = MetricTypeProto.MODEL return MetricMetaProto(uuid=metric_meta.uuid, name=stringValue(metric_meta.name),", "from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, \\ SUCCESS, RESOURCE_DOES_NOT_EXIST", "metric_summary_proto.HasField('metric_key') else None, metric_value=metric_summary_proto.metric_value.value if metric_summary_proto.HasField('metric_value') else None ) def", "def metric_meta_to_table(name: Text, dataset_id: int, model_name: Optional[Text], model_version: Optional[Text], job_id:", "[] for meta in metric_meta: res.append(metric_meta_to_proto(meta)) return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=res)", "int, metric_key: Text, metric_value: Text, store_type: Text = 'SqlAlchemyStore'): if", "metric_key: Text, metric_value: Text, store_type: Text = 'SqlAlchemyStore'): if store_type", "the License for the # specific language governing permissions and", "= MetricType.DATASET else: metric_type = MetricType.MODEL return MetricMeta(uuid=metric_meta_proto.uuid, name=metric_meta_proto.name.value, dataset_id=metric_meta_proto.dataset_id.value,", "ANY # KIND, either express or implied. See the License", "the License is distributed on an # \"AS IS\" BASIS,", "return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=metric_meta_to_proto(metric_meta)) else: return MetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def", "else: res = [] for meta in metric_meta: res.append(metric_meta_to_proto(meta)) return", "store_type: Text = 'SqlAlchemyStore'): if store_type == 'MongoStore': _class =", "# # Licensed to the Apache Software Foundation (ASF) under", "int, model_name: Optional[Text], model_version: Optional[Text], job_id: int, start_time: int, end_time:", "return MetricMeta(uuid=metric_meta_result.uuid, name=metric_meta_result.name, dataset_id=metric_meta_result.dataset_id, model_name=metric_meta_result.model_name, model_version=metric_meta_result.model_version, job_id=metric_meta_result.job_id, start_time=metric_meta_result.start_time, end_time=metric_meta_result.end_time, metric_type=MetricType.value_of(metric_meta_result.metric_type),", "not use this file except in compliance # with the", "res.append(metric_summary_to_proto(summary)) return ListMetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=res) else: return ListMetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None)", "def table_to_metric_meta(metric_meta_result) -> MetricMeta: properties = metric_meta_result.properties if properties is", "List[MetricMeta]]) -> MetricMetaResponse: if metric_meta is not None: if isinstance(metric_meta,", "= SqlMetricSummary return _class(metric_id=metric_id, metric_key=metric_key, metric_value=metric_value) def metric_meta_to_proto(metric_meta: MetricMeta) ->", "_class = SqlMetricMeta return _class(name=name, dataset_id=dataset_id, model_name=model_name, model_version=model_version, job_id=job_id, start_time=start_time,", "_class(name=name, dataset_id=dataset_id, model_name=model_name, model_version=model_version, job_id=job_id, start_time=start_time, end_time=end_time, metric_type=metric_type.value, uri=uri, tags=tags,", "MetricMeta: properties = metric_meta_result.properties if properties is not None: properties", ") def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) -> MetricSummary: return MetricSummary(uuid=metric_summary_proto.uuid, metric_id=metric_summary_proto.metric_id.value, metric_key=metric_summary_proto.metric_key.value", "Unless required by applicable law or agreed to in writing,", "MetricMeta(uuid=metric_meta_result.uuid, name=metric_meta_result.name, dataset_id=metric_meta_result.dataset_id, model_name=metric_meta_result.model_name, model_version=metric_meta_result.model_version, job_id=metric_meta_result.job_id, start_time=metric_meta_result.start_time, end_time=metric_meta_result.end_time, metric_type=MetricType.value_of(metric_meta_result.metric_type), uri=metric_meta_result.uri,", "_warp_metric_meta_response(metric_meta: Optional[MetricMeta]) -> MetricMetaResponse: if metric_meta is not None: return", "tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags') else None, metric_description=metric_meta_proto.metric_description.value if metric_meta_proto.HasField('metric_description') else None,", "ReturnCode, \\ SUCCESS, RESOURCE_DOES_NOT_EXIST from ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary from", "MetricType.MODEL return MetricMeta(uuid=metric_meta_proto.uuid, name=metric_meta_proto.name.value, dataset_id=metric_meta_proto.dataset_id.value, model_name=metric_meta_proto.model_name.value, model_version=metric_meta_proto.model_version.value, job_id=metric_meta_proto.job_id.value, start_time=metric_meta_proto.start_time.value, end_time=metric_meta_proto.end_time.value,", "in metric_meta: res.append(metric_meta_to_proto(meta)) return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=res) else: return ListMetricMetaResponse(return_code=1,", "MetricMetaProto(uuid=metric_meta.uuid, name=stringValue(metric_meta.name), dataset_id=int64Value(metric_meta.dataset_id), model_name=stringValue(metric_meta.model_name), model_version=stringValue(metric_meta.model_version), job_id=int64Value(metric_meta.job_id), start_time=int64Value(metric_meta.start_time), end_time=int64Value(metric_meta.end_time), metric_type=metric_type, uri=stringValue(metric_meta.uri),", "properties: Properties, store_type: Text = 'SqlAlchemyStore'): if properties is not", "job_id=int64Value(metric_meta.job_id), start_time=int64Value(metric_meta.start_time), end_time=int64Value(metric_meta.end_time), metric_type=metric_type, uri=stringValue(metric_meta.uri), tags=stringValue(metric_meta.tags), metric_description=stringValue(metric_meta.metric_description), properties=metric_meta.properties) def metric_summary_to_proto(metric_summary:", "from ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary from ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta", "(ASF) under one # or more contributor license agreements. See", "metric_meta_proto.HasField('tags') else None, metric_description=metric_meta_proto.metric_description.value if metric_meta_proto.HasField('metric_description') else None, properties=metric_meta_proto.properties )", "is not None: if isinstance(metric_meta, MetricMeta): return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=[metric_meta_to_proto(metric_meta)])", "def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) -> MetricSummary: return MetricSummary(uuid=metric_summary_proto.uuid, metric_id=metric_summary_proto.metric_id.value, metric_key=metric_summary_proto.metric_key.value if", "for summary in metric_summary: res.append(metric_summary_to_proto(summary)) return ListMetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=res) else:", "# or more contributor license agreements. See the NOTICE file", "else: _class = SqlMetricSummary return _class(metric_id=metric_id, metric_key=metric_key, metric_value=metric_value) def metric_meta_to_proto(metric_meta:", "agreed to in writing, # software distributed under the License", "metric_summary=metric_summary_to_proto(metric_summary)) else: return MetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None) def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) ->", "metric_description=stringValue(metric_meta.metric_description), properties=metric_meta.properties) def metric_summary_to_proto(metric_summary: MetricSummary) -> MetricSummaryProto: return MetricSummaryProto(uuid=metric_summary.uuid, metric_id=int64Value(metric_summary.metric_id),", "MetricType, MetricSummary from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, \\", "tags: Text, metric_description: Text, properties: Properties, store_type: Text = 'SqlAlchemyStore'):", "= MetricTypeProto.MODEL return MetricMetaProto(uuid=metric_meta.uuid, name=stringValue(metric_meta.name), dataset_id=int64Value(metric_meta.dataset_id), model_name=stringValue(metric_meta.model_name), model_version=stringValue(metric_meta.model_version), job_id=int64Value(metric_meta.job_id), start_time=int64Value(metric_meta.start_time),", "return MetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_summary=None) def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) -> ListMetricSummaryResponse: if", "(the # \"License\"); you may not use this file except", "if metric_meta_proto.HasField('uri') else None, tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags') else None, metric_description=metric_meta_proto.metric_description.value", "-> MetricSummaryProto: return MetricSummaryProto(uuid=metric_summary.uuid, metric_id=int64Value(metric_summary.metric_id), metric_key=stringValue(metric_summary.metric_key), metric_value=stringValue(metric_summary.metric_value)) def proto_to_metric_meta(metric_meta_proto: MetricMetaProto)", "int64Value, stringValue from ai_flow.common.properties import Properties from ai_flow.meta.metric_meta import MetricMeta,", "model_name=metric_meta_proto.model_name.value, model_version=metric_meta_proto.model_version.value, job_id=metric_meta_proto.job_id.value, start_time=metric_meta_proto.start_time.value, end_time=metric_meta_proto.end_time.value, metric_type=metric_type, uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri') else", "-> MetricSummaryResponse: if metric_summary is not None: return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),", "return MetricMeta(uuid=metric_meta_proto.uuid, name=metric_meta_proto.name.value, dataset_id=metric_meta_proto.dataset_id.value, model_name=metric_meta_proto.model_name.value, model_version=metric_meta_proto.model_version.value, job_id=metric_meta_proto.job_id.value, start_time=metric_meta_proto.start_time.value, end_time=metric_meta_proto.end_time.value, metric_type=metric_type,", "return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta, List[MetricMeta]]) -> MetricMetaResponse: if", "MetricMeta, List[MetricMeta]]) -> MetricMetaResponse: if metric_meta is not None: if", "ASF licenses this file # to you under the Apache", "typing import Text, Optional, Union, List from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse,", "on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "'MongoStore': _class = MongoMetricMeta else: _class = SqlMetricMeta return _class(name=name,", "def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) -> ListMetricSummaryResponse: if metric_summary is not None:", "ownership. The ASF licenses this file # to you under", "start_time=int64Value(metric_meta.start_time), end_time=int64Value(metric_meta.end_time), metric_type=metric_type, uri=stringValue(metric_meta.uri), tags=stringValue(metric_meta.tags), metric_description=stringValue(metric_meta.metric_description), properties=metric_meta.properties) def metric_summary_to_proto(metric_summary: MetricSummary)", "properties=properties) def table_to_metric_summary(metric_summary_result) -> MetricSummary: return MetricSummary(uuid=metric_summary_result.uuid, metric_id=metric_summary_result.metric_id, metric_key=metric_summary_result.metric_key, metric_value=metric_summary_result.metric_value)", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "with the License. You may obtain a copy of the", "end_time=metric_meta_result.end_time, metric_type=MetricType.value_of(metric_meta_result.metric_type), uri=metric_meta_result.uri, tags=metric_meta_result.tags, metric_description=metric_meta_result.metric_description, properties=properties) def table_to_metric_summary(metric_summary_result) -> MetricSummary:", "is not None: properties = str(properties) if store_type == 'MongoStore':", "if metric_meta is not None: return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=metric_meta_to_proto(metric_meta)) else:", "else None ) def _warp_metric_meta_response(metric_meta: Optional[MetricMeta]) -> MetricMetaResponse: if metric_meta", "None, properties=metric_meta_proto.properties ) def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) -> MetricSummary: return MetricSummary(uuid=metric_summary_proto.uuid,", "metric_summary: res.append(metric_summary_to_proto(summary)) return ListMetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=res) else: return ListMetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),", "applicable law or agreed to in writing, # software distributed", "Text, metric_description: Text, properties: Properties, store_type: Text = 'SqlAlchemyStore'): if", "governing permissions and limitations # under the License. # import", "table_to_metric_meta(metric_meta_result) -> MetricMeta: properties = metric_meta_result.properties if properties is not", "tags=stringValue(metric_meta.tags), metric_description=stringValue(metric_meta.metric_description), properties=metric_meta.properties) def metric_summary_to_proto(metric_summary: MetricSummary) -> MetricSummaryProto: return MetricSummaryProto(uuid=metric_summary.uuid,", "from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto,", "metric_meta_proto.metric_type: metric_type = MetricType.DATASET else: metric_type = MetricType.MODEL return MetricMeta(uuid=metric_meta_proto.uuid,", "is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES", "store_type == 'MongoStore': _class = MongoMetricMeta else: _class = SqlMetricMeta", "file # to you under the Apache License, Version 2.0", "int, metric_type: MetricType, uri: Text, tags: Text, metric_description: Text, properties:", "model_name=metric_meta_result.model_name, model_version=metric_meta_result.model_version, job_id=metric_meta_result.job_id, start_time=metric_meta_result.start_time, end_time=metric_meta_result.end_time, metric_type=MetricType.value_of(metric_meta_result.metric_type), uri=metric_meta_result.uri, tags=metric_meta_result.tags, metric_description=metric_meta_result.metric_description, properties=properties)", "# with the License. You may obtain a copy of", "MetricMeta, MetricType, MetricSummary from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "MetricTypeProto.DATASET == metric_meta_proto.metric_type: metric_type = MetricType.DATASET else: metric_type = MetricType.MODEL", "language governing permissions and limitations # under the License. #", "proto_to_metric_meta(metric_meta_proto: MetricMetaProto) -> MetricMeta: if MetricTypeProto.DATASET == metric_meta_proto.metric_type: metric_type =", "metric_meta=res) else: return ListMetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_metric_summary_response(metric_summary: Optional[MetricSummary]) ->", "from ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta def table_to_metric_meta(metric_meta_result) -> MetricMeta: properties", "== 'MongoStore': _class = MongoMetricMeta else: _class = SqlMetricMeta return", "software distributed under the License is distributed on an #", "Licensed to the Apache Software Foundation (ASF) under one #", "\\ SUCCESS, RESOURCE_DOES_NOT_EXIST from ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary from ai_flow.store.db.db_model", "under one # or more contributor license agreements. See the", "uri=metric_meta_result.uri, tags=metric_meta_result.tags, metric_description=metric_meta_result.metric_description, properties=properties) def table_to_metric_summary(metric_summary_result) -> MetricSummary: return MetricSummary(uuid=metric_summary_result.uuid,", "= 'SqlAlchemyStore'): if store_type == 'MongoStore': _class = MongoMetricSummary else:", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "= metric_meta_result.properties if properties is not None: properties = ast.literal_eval(properties)", "information # regarding copyright ownership. The ASF licenses this file", "not None: return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(), metric_summary=metric_summary_to_proto(metric_summary)) else: return MetricSummaryResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),", "None, metric_value=metric_summary_proto.metric_value.value if metric_summary_proto.HasField('metric_value') else None ) def _warp_metric_meta_response(metric_meta: Optional[MetricMeta])", "the Apache License, Version 2.0 (the # \"License\"); you may", "not None: properties = str(properties) if store_type == 'MongoStore': _class", "the License. # import ast from typing import Text, Optional,", "ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary from ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta def", "def table_to_metric_summary(metric_summary_result) -> MetricSummary: return MetricSummary(uuid=metric_summary_result.uuid, metric_id=metric_summary_result.metric_id, metric_key=metric_summary_result.metric_key, metric_value=metric_summary_result.metric_value) def", "MetricTypeProto.DATASET else: metric_type = MetricTypeProto.MODEL return MetricMetaProto(uuid=metric_meta.uuid, name=stringValue(metric_meta.name), dataset_id=int64Value(metric_meta.dataset_id), model_name=stringValue(metric_meta.model_name),", "you under the Apache License, Version 2.0 (the # \"License\");", "# KIND, either express or implied. See the License for", "metric_type = MetricType.MODEL return MetricMeta(uuid=metric_meta_proto.uuid, name=metric_meta_proto.name.value, dataset_id=metric_meta_proto.dataset_id.value, model_name=metric_meta_proto.model_name.value, model_version=metric_meta_proto.model_version.value, job_id=metric_meta_proto.job_id.value,", "uri=uri, tags=tags, metric_description=metric_description, properties=properties) def metric_summary_to_table(metric_id: int, metric_key: Text, metric_value:", "None ) def _warp_metric_meta_response(metric_meta: Optional[MetricMeta]) -> MetricMetaResponse: if metric_meta is", "agreements. See the NOTICE file # distributed with this work", "metric_value=metric_value) def metric_meta_to_proto(metric_meta: MetricMeta) -> MetricMetaProto: if metric_meta.metric_type == MetricType.DATASET:", "licenses this file # to you under the Apache License,", "metric_type = MetricType.DATASET else: metric_type = MetricType.MODEL return MetricMeta(uuid=metric_meta_proto.uuid, name=metric_meta_proto.name.value,", "return_msg=ReturnCode.Name(SUCCESS).lower(), metric_meta=metric_meta_to_proto(metric_meta)) else: return MetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_list_metric_meta_response(metric_meta: Union[None,", "by applicable law or agreed to in writing, # software", "SqlMetricSummary return _class(metric_id=metric_id, metric_key=metric_key, metric_value=metric_value) def metric_meta_to_proto(metric_meta: MetricMeta) -> MetricMetaProto:", "# Unless required by applicable law or agreed to in", "name=metric_meta_result.name, dataset_id=metric_meta_result.dataset_id, model_name=metric_meta_result.model_name, model_version=metric_meta_result.model_version, job_id=metric_meta_result.job_id, start_time=metric_meta_result.start_time, end_time=metric_meta_result.end_time, metric_type=MetricType.value_of(metric_meta_result.metric_type), uri=metric_meta_result.uri, tags=metric_meta_result.tags,", "properties = str(properties) if store_type == 'MongoStore': _class = MongoMetricMeta", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "int, start_time: int, end_time: int, metric_type: MetricType, uri: Text, tags:", "MetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta, List[MetricMeta]]) -> MetricMetaResponse:", "License. You may obtain a copy of the License at", "metric_meta_proto.HasField('metric_description') else None, properties=metric_meta_proto.properties ) def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) -> MetricSummary:", "You may obtain a copy of the License at #", "metric_type=metric_type, uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri') else None, tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags') else", "MetricMetaResponse, ListMetricMetaResponse, \\ MetricSummaryResponse, ListMetricSummaryResponse from ai_flow.rest_endpoint.service import int64Value, stringValue", "Union, List from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse, \\ MetricSummaryResponse, ListMetricSummaryResponse", "ListMetricSummaryResponse: if metric_summary is not None: res = [] for", "job_id=job_id, start_time=start_time, end_time=end_time, metric_type=metric_type.value, uri=uri, tags=tags, metric_description=metric_description, properties=properties) def metric_summary_to_table(metric_id:", "compliance # with the License. You may obtain a copy", "Properties, store_type: Text = 'SqlAlchemyStore'): if properties is not None:", "ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse, \\ MetricSummaryResponse, ListMetricSummaryResponse from ai_flow.rest_endpoint.service import", "from ai_flow.common.properties import Properties from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary", "else: return MetricMetaResponse(return_code=1, return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(), metric_meta=None) def _warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta, List[MetricMeta]])", "Optional[Text], model_version: Optional[Text], job_id: int, start_time: int, end_time: int, metric_type:", "-> ListMetricSummaryResponse: if metric_summary is not None: res = []" ]
[ "initElement(self): self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档')) self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg 笔记')) self.openVideoHelpButtone", "PySide2.QtCore import Signal from moduels.component.NormalValue import 常量 from moduels.component.SponsorDialog import", "def __init__(self): super().__init__() self.initElement() # 先初始化各个控件 self.initSlots() # 再将各个控件连接到信号槽 self.initLayout()", "= QVBoxLayout() def initSlots(self): self.打开帮助按钮.clicked.connect(self.openHelpDocument) self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489'))) self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/'))) self.openGiteePage.clicked.connect(lambda:", "self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases'))) self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open( self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi'))) self.tipButton.clicked.connect(lambda: SponsorDialog(self)) def initLayout(self): self.setLayout(self.masterLayout)", "self.linkToDiscussPage.setMaximumHeight(100) self.tipButton.setMaximumHeight(100) def openHelpDocument(self): try: if 常量.系统平台 == 'Darwin': import", "self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open( self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi'))) self.tipButton.clicked.connect(lambda: SponsorDialog(self)) def initLayout(self): self.setLayout(self.masterLayout) # self.masterLayout.addWidget(self.打开帮助按钮)", "self.initValue() # 再定义各个控件的值 def initElement(self): self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档')) self.ffmpegMannualNoteButton =", "webbrowser.open( self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi'))) self.tipButton.clicked.connect(lambda: SponsorDialog(self)) def initLayout(self): self.setLayout(self.masterLayout) # self.masterLayout.addWidget(self.打开帮助按钮) #", "self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本')) self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到", "def initElement(self): self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档')) self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg 笔记'))", "再定义各个控件的值 def initElement(self): self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档')) self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg", "def initSlots(self): self.打开帮助按钮.clicked.connect(self.openHelpDocument) self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489'))) self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/'))) self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases'))) self.openGithubPage.clicked.connect(lambda:", "检查新版本')) self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本')) self.linkToDiscussPage = QPushButton(self.tr('加入", "self.openGiteePage.setMaximumHeight(100) self.openGithubPage.setMaximumHeight(100) self.linkToDiscussPage.setMaximumHeight(100) self.tipButton.setMaximumHeight(100) def openHelpDocument(self): try: if 常量.系统平台 ==", "super().__init__() self.initElement() # 先初始化各个控件 self.initSlots() # 再将各个控件连接到信号槽 self.initLayout() # 然后布局", "def initValue(self): self.打开帮助按钮.setMaximumHeight(100) self.ffmpegMannualNoteButton.setMaximumHeight(100) self.openVideoHelpButtone.setMaximumHeight(100) self.openGiteePage.setMaximumHeight(100) self.openGithubPage.setMaximumHeight(100) self.linkToDiscussPage.setMaximumHeight(100) self.tipButton.setMaximumHeight(100) def", "self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases'))) self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases'))) self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open( self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi'))) self.tipButton.clicked.connect(lambda: SponsorDialog(self)) def", "<reponame>HaujetZhao/Caps_Writer<filename>src/moduels/gui/Tab_Help.py # -*- coding: UTF-8 -*- from PySide2.QtWidgets import QWidget,", "self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档')) self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg 笔记')) self.openVideoHelpButtone =", "import Signal from moduels.component.NormalValue import 常量 from moduels.component.SponsorDialog import SponsorDialog", "QVBoxLayout from PySide2.QtCore import Signal from moduels.component.NormalValue import 常量 from", "状态栏消息 = Signal(str, int) def __init__(self): super().__init__() self.initElement() # 先初始化各个控件", "UTF-8 -*- from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout from PySide2.QtCore", "moduels.component.SponsorDialog import SponsorDialog import os, webbrowser class Tab_Help(QWidget): 状态栏消息 =", "self.initSlots() # 再将各个控件连接到信号槽 self.initLayout() # 然后布局 self.initValue() # 再定义各个控件的值 def", "QPushButton(self.tr('查看视频教程')) self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本')) self.openGithubPage = QPushButton(self.tr(f'当前版本是", "def openHelpDocument(self): try: if 常量.系统平台 == 'Darwin': import shlex os.system(\"open", "self.openVideoHelpButtone.setMaximumHeight(100) self.openGiteePage.setMaximumHeight(100) self.openGithubPage.setMaximumHeight(100) self.linkToDiscussPage.setMaximumHeight(100) self.tipButton.setMaximumHeight(100) def openHelpDocument(self): try: if 常量.系统平台", "QPushButton(self.tr('加入 QQ 群')) self.tipButton = QPushButton(self.tr('打赏作者')) self.masterLayout = QVBoxLayout() def", "self.masterLayout.addWidget(self.openGithubPage) self.masterLayout.addWidget(self.linkToDiscussPage) self.masterLayout.addWidget(self.tipButton) def initValue(self): self.打开帮助按钮.setMaximumHeight(100) self.ffmpegMannualNoteButton.setMaximumHeight(100) self.openVideoHelpButtone.setMaximumHeight(100) self.openGiteePage.setMaximumHeight(100) self.openGithubPage.setMaximumHeight(100)", "= QPushButton(self.tr('加入 QQ 群')) self.tipButton = QPushButton(self.tr('打赏作者')) self.masterLayout = QVBoxLayout()", "os.system(\"open \" + shlex.quote(self.tr(\"./misc/Docs/README_zh.html\"))) elif 常量.系统平台 == 'Windows': os.startfile(os.path.realpath(self.tr('./misc/Docs/README_zh.html'))) except:", "__init__(self): super().__init__() self.initElement() # 先初始化各个控件 self.initSlots() # 再将各个控件连接到信号槽 self.initLayout() #", "笔记')) self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程')) self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本'))", "self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489'))) self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/'))) self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases'))) self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases'))) self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open(", "coding: UTF-8 -*- from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout from", "-*- from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout from PySide2.QtCore import", "== 'Darwin': import shlex os.system(\"open \" + shlex.quote(self.tr(\"./misc/Docs/README_zh.html\"))) elif 常量.系统平台", "QPushButton(self.tr('打赏作者')) self.masterLayout = QVBoxLayout() def initSlots(self): self.打开帮助按钮.clicked.connect(self.openHelpDocument) self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489'))) self.openVideoHelpButtone.clicked.connect(lambda:", "FFmpeg 笔记')) self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程')) self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee", "try: if 常量.系统平台 == 'Darwin': import shlex os.system(\"open \" +", "import 常量 from moduels.component.SponsorDialog import SponsorDialog import os, webbrowser class", "QQ 群')) self.tipButton = QPushButton(self.tr('打赏作者')) self.masterLayout = QVBoxLayout() def initSlots(self):", "QVBoxLayout() def initSlots(self): self.打开帮助按钮.clicked.connect(self.openHelpDocument) self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489'))) self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/'))) self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases')))", "= QPushButton(self.tr('打开帮助文档')) self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg 笔记')) self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程'))", "= QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本')) self.linkToDiscussPage = QPushButton(self.tr('加入 QQ 群'))", "# -*- coding: UTF-8 -*- from PySide2.QtWidgets import QWidget, QPushButton,", "import shlex os.system(\"open \" + shlex.quote(self.tr(\"./misc/Docs/README_zh.html\"))) elif 常量.系统平台 == 'Windows':", "\" + shlex.quote(self.tr(\"./misc/Docs/README_zh.html\"))) elif 常量.系统平台 == 'Windows': os.startfile(os.path.realpath(self.tr('./misc/Docs/README_zh.html'))) except: print('未能打开帮助文档')", "Gitee 检查新版本')) self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本')) self.linkToDiscussPage =", "webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases'))) self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open( self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi'))) self.tipButton.clicked.connect(lambda: SponsorDialog(self)) def initLayout(self): self.setLayout(self.masterLayout) #", "# 然后布局 self.initValue() # 再定义各个控件的值 def initElement(self): self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档'))", "# self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone) self.masterLayout.addWidget(self.openGiteePage) self.masterLayout.addWidget(self.openGithubPage) self.masterLayout.addWidget(self.linkToDiscussPage) self.masterLayout.addWidget(self.tipButton) def initValue(self): self.打开帮助按钮.setMaximumHeight(100)", "self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本')) self.linkToDiscussPage = QPushButton(self.tr('加入 QQ", "# self.masterLayout.addWidget(self.打开帮助按钮) # self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone) self.masterLayout.addWidget(self.openGiteePage) self.masterLayout.addWidget(self.openGithubPage) self.masterLayout.addWidget(self.linkToDiscussPage) self.masterLayout.addWidget(self.tipButton) def", "常量.系统平台 == 'Darwin': import shlex os.system(\"open \" + shlex.quote(self.tr(\"./misc/Docs/README_zh.html\"))) elif", "PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout from PySide2.QtCore import Signal from", "def initLayout(self): self.setLayout(self.masterLayout) # self.masterLayout.addWidget(self.打开帮助按钮) # self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone) self.masterLayout.addWidget(self.openGiteePage) self.masterLayout.addWidget(self.openGithubPage)", "self.setLayout(self.masterLayout) # self.masterLayout.addWidget(self.打开帮助按钮) # self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone) self.masterLayout.addWidget(self.openGiteePage) self.masterLayout.addWidget(self.openGithubPage) self.masterLayout.addWidget(self.linkToDiscussPage) self.masterLayout.addWidget(self.tipButton)", "'Darwin': import shlex os.system(\"open \" + shlex.quote(self.tr(\"./misc/Docs/README_zh.html\"))) elif 常量.系统平台 ==", "= QPushButton(self.tr('查看作者的 FFmpeg 笔记')) self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程')) self.openGiteePage = QPushButton(self.tr(f'当前版本是", "self.initElement() # 先初始化各个控件 self.initSlots() # 再将各个控件连接到信号槽 self.initLayout() # 然后布局 self.initValue()", "initValue(self): self.打开帮助按钮.setMaximumHeight(100) self.ffmpegMannualNoteButton.setMaximumHeight(100) self.openVideoHelpButtone.setMaximumHeight(100) self.openGiteePage.setMaximumHeight(100) self.openGithubPage.setMaximumHeight(100) self.linkToDiscussPage.setMaximumHeight(100) self.tipButton.setMaximumHeight(100) def openHelpDocument(self):", "import QWidget, QPushButton, QVBoxLayout from PySide2.QtCore import Signal from moduels.component.NormalValue", "self.打开帮助按钮.clicked.connect(self.openHelpDocument) self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489'))) self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/'))) self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases'))) self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases'))) self.linkToDiscussPage.clicked.connect(lambda:", "self.masterLayout.addWidget(self.openVideoHelpButtone) self.masterLayout.addWidget(self.openGiteePage) self.masterLayout.addWidget(self.openGithubPage) self.masterLayout.addWidget(self.linkToDiscussPage) self.masterLayout.addWidget(self.tipButton) def initValue(self): self.打开帮助按钮.setMaximumHeight(100) self.ffmpegMannualNoteButton.setMaximumHeight(100) self.openVideoHelpButtone.setMaximumHeight(100)", "= QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本')) self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github", "QPushButton(self.tr('打开帮助文档')) self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg 笔记')) self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程')) self.openGiteePage", "QPushButton(self.tr('查看作者的 FFmpeg 笔记')) self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程')) self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到", "# 再将各个控件连接到信号槽 self.initLayout() # 然后布局 self.initValue() # 再定义各个控件的值 def initElement(self):", "QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本')) self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本'))", "# 再定义各个控件的值 def initElement(self): self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档')) self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的", "from moduels.component.NormalValue import 常量 from moduels.component.SponsorDialog import SponsorDialog import os,", "self.masterLayout.addWidget(self.打开帮助按钮) # self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone) self.masterLayout.addWidget(self.openGiteePage) self.masterLayout.addWidget(self.openGithubPage) self.masterLayout.addWidget(self.linkToDiscussPage) self.masterLayout.addWidget(self.tipButton) def initValue(self):", "Tab_Help(QWidget): 状态栏消息 = Signal(str, int) def __init__(self): super().__init__() self.initElement() #", "群')) self.tipButton = QPushButton(self.tr('打赏作者')) self.masterLayout = QVBoxLayout() def initSlots(self): self.打开帮助按钮.clicked.connect(self.openHelpDocument)", "self.masterLayout.addWidget(self.openGiteePage) self.masterLayout.addWidget(self.openGithubPage) self.masterLayout.addWidget(self.linkToDiscussPage) self.masterLayout.addWidget(self.tipButton) def initValue(self): self.打开帮助按钮.setMaximumHeight(100) self.ffmpegMannualNoteButton.setMaximumHeight(100) self.openVideoHelpButtone.setMaximumHeight(100) self.openGiteePage.setMaximumHeight(100)", "self.tipButton.setMaximumHeight(100) def openHelpDocument(self): try: if 常量.系统平台 == 'Darwin': import shlex", "v{常量.软件版本},到 Github 检查新版本')) self.linkToDiscussPage = QPushButton(self.tr('加入 QQ 群')) self.tipButton =", "self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone) self.masterLayout.addWidget(self.openGiteePage) self.masterLayout.addWidget(self.openGithubPage) self.masterLayout.addWidget(self.linkToDiscussPage) self.masterLayout.addWidget(self.tipButton) def initValue(self): self.打开帮助按钮.setMaximumHeight(100) self.ffmpegMannualNoteButton.setMaximumHeight(100)", "Github 检查新版本')) self.linkToDiscussPage = QPushButton(self.tr('加入 QQ 群')) self.tipButton = QPushButton(self.tr('打赏作者'))", "os, webbrowser class Tab_Help(QWidget): 状态栏消息 = Signal(str, int) def __init__(self):", "Signal(str, int) def __init__(self): super().__init__() self.initElement() # 先初始化各个控件 self.initSlots() #", "class Tab_Help(QWidget): 状态栏消息 = Signal(str, int) def __init__(self): super().__init__() self.initElement()", "if 常量.系统平台 == 'Darwin': import shlex os.system(\"open \" + shlex.quote(self.tr(\"./misc/Docs/README_zh.html\")))", "然后布局 self.initValue() # 再定义各个控件的值 def initElement(self): self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档')) self.ffmpegMannualNoteButton", "self.打开帮助按钮.setMaximumHeight(100) self.ffmpegMannualNoteButton.setMaximumHeight(100) self.openVideoHelpButtone.setMaximumHeight(100) self.openGiteePage.setMaximumHeight(100) self.openGithubPage.setMaximumHeight(100) self.linkToDiscussPage.setMaximumHeight(100) self.tipButton.setMaximumHeight(100) def openHelpDocument(self): try:", "shlex os.system(\"open \" + shlex.quote(self.tr(\"./misc/Docs/README_zh.html\"))) elif 常量.系统平台 == 'Windows': os.startfile(os.path.realpath(self.tr('./misc/Docs/README_zh.html')))", "SponsorDialog(self)) def initLayout(self): self.setLayout(self.masterLayout) # self.masterLayout.addWidget(self.打开帮助按钮) # self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone) self.masterLayout.addWidget(self.openGiteePage)", "self.openGithubPage.setMaximumHeight(100) self.linkToDiscussPage.setMaximumHeight(100) self.tipButton.setMaximumHeight(100) def openHelpDocument(self): try: if 常量.系统平台 == 'Darwin':", "self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/'))) self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases'))) self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases'))) self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open( self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi'))) self.tipButton.clicked.connect(lambda:", "self.masterLayout.addWidget(self.linkToDiscussPage) self.masterLayout.addWidget(self.tipButton) def initValue(self): self.打开帮助按钮.setMaximumHeight(100) self.ffmpegMannualNoteButton.setMaximumHeight(100) self.openVideoHelpButtone.setMaximumHeight(100) self.openGiteePage.setMaximumHeight(100) self.openGithubPage.setMaximumHeight(100) self.linkToDiscussPage.setMaximumHeight(100)", "from moduels.component.SponsorDialog import SponsorDialog import os, webbrowser class Tab_Help(QWidget): 状态栏消息", "self.linkToDiscussPage = QPushButton(self.tr('加入 QQ 群')) self.tipButton = QPushButton(self.tr('打赏作者')) self.masterLayout =", "再将各个控件连接到信号槽 self.initLayout() # 然后布局 self.initValue() # 再定义各个控件的值 def initElement(self): self.打开帮助按钮", "from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout from PySide2.QtCore import Signal", "Signal from moduels.component.NormalValue import 常量 from moduels.component.SponsorDialog import SponsorDialog import", "v{常量.软件版本},到 Gitee 检查新版本')) self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本')) self.linkToDiscussPage", "webbrowser class Tab_Help(QWidget): 状态栏消息 = Signal(str, int) def __init__(self): super().__init__()", "self.masterLayout.addWidget(self.tipButton) def initValue(self): self.打开帮助按钮.setMaximumHeight(100) self.ffmpegMannualNoteButton.setMaximumHeight(100) self.openVideoHelpButtone.setMaximumHeight(100) self.openGiteePage.setMaximumHeight(100) self.openGithubPage.setMaximumHeight(100) self.linkToDiscussPage.setMaximumHeight(100) self.tipButton.setMaximumHeight(100)", "检查新版本')) self.linkToDiscussPage = QPushButton(self.tr('加入 QQ 群')) self.tipButton = QPushButton(self.tr('打赏作者')) self.masterLayout", "initSlots(self): self.打开帮助按钮.clicked.connect(self.openHelpDocument) self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489'))) self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/'))) self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases'))) self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases')))", "self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi'))) self.tipButton.clicked.connect(lambda: SponsorDialog(self)) def initLayout(self): self.setLayout(self.masterLayout) # self.masterLayout.addWidget(self.打开帮助按钮) # self.masterLayout.addWidget(self.ffmpegMannualNoteButton)", "= QPushButton(self.tr('打赏作者')) self.masterLayout = QVBoxLayout() def initSlots(self): self.打开帮助按钮.clicked.connect(self.openHelpDocument) self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489')))", "self.ffmpegMannualNoteButton.setMaximumHeight(100) self.openVideoHelpButtone.setMaximumHeight(100) self.openGiteePage.setMaximumHeight(100) self.openGithubPage.setMaximumHeight(100) self.linkToDiscussPage.setMaximumHeight(100) self.tipButton.setMaximumHeight(100) def openHelpDocument(self): try: if", "self.initLayout() # 然后布局 self.initValue() # 再定义各个控件的值 def initElement(self): self.打开帮助按钮 =", "import os, webbrowser class Tab_Help(QWidget): 状态栏消息 = Signal(str, int) def", "= QPushButton(self.tr('查看视频教程')) self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本')) self.openGithubPage =", "initLayout(self): self.setLayout(self.masterLayout) # self.masterLayout.addWidget(self.打开帮助按钮) # self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone) self.masterLayout.addWidget(self.openGiteePage) self.masterLayout.addWidget(self.openGithubPage) self.masterLayout.addWidget(self.linkToDiscussPage)", "QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本')) self.linkToDiscussPage = QPushButton(self.tr('加入 QQ 群')) self.tipButton", "import SponsorDialog import os, webbrowser class Tab_Help(QWidget): 状态栏消息 = Signal(str,", "from PySide2.QtCore import Signal from moduels.component.NormalValue import 常量 from moduels.component.SponsorDialog", "int) def __init__(self): super().__init__() self.initElement() # 先初始化各个控件 self.initSlots() # 再将各个控件连接到信号槽", "QPushButton, QVBoxLayout from PySide2.QtCore import Signal from moduels.component.NormalValue import 常量", "moduels.component.NormalValue import 常量 from moduels.component.SponsorDialog import SponsorDialog import os, webbrowser", "self.masterLayout = QVBoxLayout() def initSlots(self): self.打开帮助按钮.clicked.connect(self.openHelpDocument) self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489'))) self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/')))", "openHelpDocument(self): try: if 常量.系统平台 == 'Darwin': import shlex os.system(\"open \"", "webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases'))) self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases'))) self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open( self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi'))) self.tipButton.clicked.connect(lambda: SponsorDialog(self)) def initLayout(self):", "常量 from moduels.component.SponsorDialog import SponsorDialog import os, webbrowser class Tab_Help(QWidget):", "self.tipButton.clicked.connect(lambda: SponsorDialog(self)) def initLayout(self): self.setLayout(self.masterLayout) # self.masterLayout.addWidget(self.打开帮助按钮) # self.masterLayout.addWidget(self.ffmpegMannualNoteButton) self.masterLayout.addWidget(self.openVideoHelpButtone)", "先初始化各个控件 self.initSlots() # 再将各个控件连接到信号槽 self.initLayout() # 然后布局 self.initValue() # 再定义各个控件的值", "# 先初始化各个控件 self.initSlots() # 再将各个控件连接到信号槽 self.initLayout() # 然后布局 self.initValue() #", "webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489'))) self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/'))) self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases'))) self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases'))) self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open( self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi')))", "webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/'))) self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases'))) self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases'))) self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open( self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi'))) self.tipButton.clicked.connect(lambda: SponsorDialog(self))", "self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg 笔记')) self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程')) self.openGiteePage =", "QWidget, QPushButton, QVBoxLayout from PySide2.QtCore import Signal from moduels.component.NormalValue import", "self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程')) self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本')) self.openGithubPage", "self.tipButton = QPushButton(self.tr('打赏作者')) self.masterLayout = QVBoxLayout() def initSlots(self): self.打开帮助按钮.clicked.connect(self.openHelpDocument) self.ffmpegMannualNoteButton.clicked.connect(lambda:", "-*- coding: UTF-8 -*- from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout", "SponsorDialog import os, webbrowser class Tab_Help(QWidget): 状态栏消息 = Signal(str, int)", "= Signal(str, int) def __init__(self): super().__init__() self.initElement() # 先初始化各个控件 self.initSlots()" ]
[ "typing import Callable, Optional, Type, cast from fastapi import APIRouter,", "import users from app.common.user import ErrorCode, run_handler from app.users.user import", "from app.models import users from app.common.user import ErrorCode, run_handler from", "Optional, Type, cast from fastapi import APIRouter, HTTPException, Request, status", "def get_register_router( create_user: CreateUserProtocol, user_model: Type[users.BaseUser], user_create_model: Type[users.BaseUserCreate], after_register: Optional[Callable[[users.UD,", "Type[users.BaseUserCreate], after_register: Optional[Callable[[users.UD, Request], None]] = None, validate_password: Optional[ValidatePasswordProtocol] =", "except InvalidPasswordException as e: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ \"code\": ErrorCode.REGISTER_INVALID_PASSWORD,", "cast(users.BaseUserCreate, user) # Prevent mypy complain if validate_password: try: await", "# Prevent mypy complain if validate_password: try: await validate_password(user.password, user)", "validate_password(user.password, user) except InvalidPasswordException as e: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={", "created_user = await create_user(user, safe=True) except UserAlreadyExists: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST,", "create_user(user, safe=True) except UserAlreadyExists: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS, ) if", "fastapi import APIRouter, HTTPException, Request, status from app.models import users", "status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS, ) if after_register: await run_handler(after_register, created_user, request) return", "APIRouter() @router.post( \"/register\", response_model=user_model, status_code=status.HTTP_201_CREATED ) async def register(request: Request,", "( CreateUserProtocol, InvalidPasswordException, UserAlreadyExists, ValidatePasswordProtocol, ) def get_register_router( create_user: CreateUserProtocol,", "\"reason\": e.reason, }, ) try: created_user = await create_user(user, safe=True)", "from fastapi import APIRouter, HTTPException, Request, status from app.models import", "validate_password: try: await validate_password(user.password, user) except InvalidPasswordException as e: raise", "# type: ignore user = cast(users.BaseUserCreate, user) # Prevent mypy", "= await create_user(user, safe=True) except UserAlreadyExists: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS,", "= None, validate_password: Optional[ValidatePasswordProtocol] = None, ) -> APIRouter: \"\"\"Generate", "cast from fastapi import APIRouter, HTTPException, Request, status from app.models", "\"code\": ErrorCode.REGISTER_INVALID_PASSWORD, \"reason\": e.reason, }, ) try: created_user = await", "try: await validate_password(user.password, user) except InvalidPasswordException as e: raise HTTPException(", "None]] = None, validate_password: Optional[ValidatePasswordProtocol] = None, ) -> APIRouter:", ") try: created_user = await create_user(user, safe=True) except UserAlreadyExists: raise", "ErrorCode, run_handler from app.users.user import ( CreateUserProtocol, InvalidPasswordException, UserAlreadyExists, ValidatePasswordProtocol,", "APIRouter, HTTPException, Request, status from app.models import users from app.common.user", "Type, cast from fastapi import APIRouter, HTTPException, Request, status from", "InvalidPasswordException as e: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ \"code\": ErrorCode.REGISTER_INVALID_PASSWORD, \"reason\":", "as e: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ \"code\": ErrorCode.REGISTER_INVALID_PASSWORD, \"reason\": e.reason,", "Request, user: user_create_model): # type: ignore user = cast(users.BaseUserCreate, user)", "safe=True) except UserAlreadyExists: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS, ) if after_register:", "mypy complain if validate_password: try: await validate_password(user.password, user) except InvalidPasswordException", "-> APIRouter: \"\"\"Generate a router with the register route.\"\"\" router", "router = APIRouter() @router.post( \"/register\", response_model=user_model, status_code=status.HTTP_201_CREATED ) async def", "if validate_password: try: await validate_password(user.password, user) except InvalidPasswordException as e:", "ignore user = cast(users.BaseUserCreate, user) # Prevent mypy complain if", "None, validate_password: Optional[ValidatePasswordProtocol] = None, ) -> APIRouter: \"\"\"Generate a", ") -> APIRouter: \"\"\"Generate a router with the register route.\"\"\"", "HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ \"code\": ErrorCode.REGISTER_INVALID_PASSWORD, \"reason\": e.reason, }, ) try:", "user_create_model): # type: ignore user = cast(users.BaseUserCreate, user) # Prevent", "try: created_user = await create_user(user, safe=True) except UserAlreadyExists: raise HTTPException(", "CreateUserProtocol, InvalidPasswordException, UserAlreadyExists, ValidatePasswordProtocol, ) def get_register_router( create_user: CreateUserProtocol, user_model:", "\"/register\", response_model=user_model, status_code=status.HTTP_201_CREATED ) async def register(request: Request, user: user_create_model):", "import Callable, Optional, Type, cast from fastapi import APIRouter, HTTPException,", ") def get_register_router( create_user: CreateUserProtocol, user_model: Type[users.BaseUser], user_create_model: Type[users.BaseUserCreate], after_register:", "e: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ \"code\": ErrorCode.REGISTER_INVALID_PASSWORD, \"reason\": e.reason, },", "async def register(request: Request, user: user_create_model): # type: ignore user", "user) # Prevent mypy complain if validate_password: try: await validate_password(user.password,", "validate_password: Optional[ValidatePasswordProtocol] = None, ) -> APIRouter: \"\"\"Generate a router", "the register route.\"\"\" router = APIRouter() @router.post( \"/register\", response_model=user_model, status_code=status.HTTP_201_CREATED", "user) except InvalidPasswordException as e: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ \"code\":", "Prevent mypy complain if validate_password: try: await validate_password(user.password, user) except", "import APIRouter, HTTPException, Request, status from app.models import users from", "if after_register: await run_handler(after_register, created_user, request) return created_user return router", "Type[users.BaseUser], user_create_model: Type[users.BaseUserCreate], after_register: Optional[Callable[[users.UD, Request], None]] = None, validate_password:", "a router with the register route.\"\"\" router = APIRouter() @router.post(", "user_create_model: Type[users.BaseUserCreate], after_register: Optional[Callable[[users.UD, Request], None]] = None, validate_password: Optional[ValidatePasswordProtocol]", "with the register route.\"\"\" router = APIRouter() @router.post( \"/register\", response_model=user_model,", "app.models import users from app.common.user import ErrorCode, run_handler from app.users.user", "run_handler from app.users.user import ( CreateUserProtocol, InvalidPasswordException, UserAlreadyExists, ValidatePasswordProtocol, )", "@router.post( \"/register\", response_model=user_model, status_code=status.HTTP_201_CREATED ) async def register(request: Request, user:", "user = cast(users.BaseUserCreate, user) # Prevent mypy complain if validate_password:", "HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS, ) if after_register: await run_handler(after_register, created_user, request)", "ValidatePasswordProtocol, ) def get_register_router( create_user: CreateUserProtocol, user_model: Type[users.BaseUser], user_create_model: Type[users.BaseUserCreate],", "complain if validate_password: try: await validate_password(user.password, user) except InvalidPasswordException as", "CreateUserProtocol, user_model: Type[users.BaseUser], user_create_model: Type[users.BaseUserCreate], after_register: Optional[Callable[[users.UD, Request], None]] =", "HTTPException, Request, status from app.models import users from app.common.user import", ") if after_register: await run_handler(after_register, created_user, request) return created_user return", "APIRouter: \"\"\"Generate a router with the register route.\"\"\" router =", "route.\"\"\" router = APIRouter() @router.post( \"/register\", response_model=user_model, status_code=status.HTTP_201_CREATED ) async", "from typing import Callable, Optional, Type, cast from fastapi import", "Optional[ValidatePasswordProtocol] = None, ) -> APIRouter: \"\"\"Generate a router with", "None, ) -> APIRouter: \"\"\"Generate a router with the register", "except UserAlreadyExists: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS, ) if after_register: await", "import ErrorCode, run_handler from app.users.user import ( CreateUserProtocol, InvalidPasswordException, UserAlreadyExists,", "UserAlreadyExists, ValidatePasswordProtocol, ) def get_register_router( create_user: CreateUserProtocol, user_model: Type[users.BaseUser], user_create_model:", "raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS, ) if after_register: await run_handler(after_register, created_user,", "await validate_password(user.password, user) except InvalidPasswordException as e: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST,", "= None, ) -> APIRouter: \"\"\"Generate a router with the", "app.common.user import ErrorCode, run_handler from app.users.user import ( CreateUserProtocol, InvalidPasswordException,", "Request, status from app.models import users from app.common.user import ErrorCode,", "register(request: Request, user: user_create_model): # type: ignore user = cast(users.BaseUserCreate,", "create_user: CreateUserProtocol, user_model: Type[users.BaseUser], user_create_model: Type[users.BaseUserCreate], after_register: Optional[Callable[[users.UD, Request], None]]", "type: ignore user = cast(users.BaseUserCreate, user) # Prevent mypy complain", "import ( CreateUserProtocol, InvalidPasswordException, UserAlreadyExists, ValidatePasswordProtocol, ) def get_register_router( create_user:", "Optional[Callable[[users.UD, Request], None]] = None, validate_password: Optional[ValidatePasswordProtocol] = None, )", "status from app.models import users from app.common.user import ErrorCode, run_handler", "= cast(users.BaseUserCreate, user) # Prevent mypy complain if validate_password: try:", "from app.users.user import ( CreateUserProtocol, InvalidPasswordException, UserAlreadyExists, ValidatePasswordProtocol, ) def", "router with the register route.\"\"\" router = APIRouter() @router.post( \"/register\",", "Callable, Optional, Type, cast from fastapi import APIRouter, HTTPException, Request,", ") async def register(request: Request, user: user_create_model): # type: ignore", "detail={ \"code\": ErrorCode.REGISTER_INVALID_PASSWORD, \"reason\": e.reason, }, ) try: created_user =", "\"\"\"Generate a router with the register route.\"\"\" router = APIRouter()", "Request], None]] = None, validate_password: Optional[ValidatePasswordProtocol] = None, ) ->", "app.users.user import ( CreateUserProtocol, InvalidPasswordException, UserAlreadyExists, ValidatePasswordProtocol, ) def get_register_router(", "after_register: Optional[Callable[[users.UD, Request], None]] = None, validate_password: Optional[ValidatePasswordProtocol] = None,", "e.reason, }, ) try: created_user = await create_user(user, safe=True) except", "UserAlreadyExists: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS, ) if after_register: await run_handler(after_register,", "detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS, ) if after_register: await run_handler(after_register, created_user, request) return created_user", "<gh_stars>1-10 from typing import Callable, Optional, Type, cast from fastapi", "InvalidPasswordException, UserAlreadyExists, ValidatePasswordProtocol, ) def get_register_router( create_user: CreateUserProtocol, user_model: Type[users.BaseUser],", "users from app.common.user import ErrorCode, run_handler from app.users.user import (", "from app.common.user import ErrorCode, run_handler from app.users.user import ( CreateUserProtocol,", "ErrorCode.REGISTER_INVALID_PASSWORD, \"reason\": e.reason, }, ) try: created_user = await create_user(user,", "user: user_create_model): # type: ignore user = cast(users.BaseUserCreate, user) #", "await create_user(user, safe=True) except UserAlreadyExists: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS, )", "raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ \"code\": ErrorCode.REGISTER_INVALID_PASSWORD, \"reason\": e.reason, }, )", "}, ) try: created_user = await create_user(user, safe=True) except UserAlreadyExists:", "register route.\"\"\" router = APIRouter() @router.post( \"/register\", response_model=user_model, status_code=status.HTTP_201_CREATED )", "= APIRouter() @router.post( \"/register\", response_model=user_model, status_code=status.HTTP_201_CREATED ) async def register(request:", "get_register_router( create_user: CreateUserProtocol, user_model: Type[users.BaseUser], user_create_model: Type[users.BaseUserCreate], after_register: Optional[Callable[[users.UD, Request],", "user_model: Type[users.BaseUser], user_create_model: Type[users.BaseUserCreate], after_register: Optional[Callable[[users.UD, Request], None]] = None,", "status_code=status.HTTP_400_BAD_REQUEST, detail={ \"code\": ErrorCode.REGISTER_INVALID_PASSWORD, \"reason\": e.reason, }, ) try: created_user", "status_code=status.HTTP_201_CREATED ) async def register(request: Request, user: user_create_model): # type:", "response_model=user_model, status_code=status.HTTP_201_CREATED ) async def register(request: Request, user: user_create_model): #", "def register(request: Request, user: user_create_model): # type: ignore user =" ]
[ "= np.where(nans) flow_magnitude[nans] = 0. # Normalize hsv[..., 0] =", "coding: utf-8 -*- import cv2 import numpy as np def", "np.any(nans): nans = np.where(nans) flow_magnitude[nans] = 0. # Normalize hsv[...,", "import numpy as np def flow_to_img(flow, normalize=True): \"\"\"Convert flow to", "we do here. Args: flow: optical flow normalize: Normalize flow", "to 0..255 Returns: img: viewable representation of the dense optical", "hsv[..., 2] = 255 img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) return img", "here. Args: flow: optical flow normalize: Normalize flow to 0..255", "length. This is similar to the OpenCV tutorial on dense", "saturation to encode vector length. This is similar to the", "dense optical flow, except that they map vector length to", "instead of the saturation plane, as we do here. Args:", "optical flow, except that they map vector length to the", "flow: optical flow normalize: Normalize flow to 0..255 Returns: img:", "/ 2 if normalize is True: hsv[..., 1] = cv2.normalize(flow_magnitude,", "plane, as we do here. Args: flow: optical flow normalize:", "that they map vector length to the value plane of", "hsv[..., 1] = cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX) else: hsv[...,", "\"\"\"Convert flow to viewable image, using color hue to encode", "do here. Args: flow: optical flow normalize: Normalize flow to", "# Normalize hsv[..., 0] = flow_angle * 180 / np.pi", "0..255 Returns: img: viewable representation of the dense optical flow", "= 0. # Normalize hsv[..., 0] = flow_angle * 180", "dense optical flow in RGB format Ref: https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py \"\"\" hsv", "encode vector length. This is similar to the OpenCV tutorial", "flow, except that they map vector length to the value", "in RGB format Ref: https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py \"\"\" hsv = np.zeros((flow.shape[0], flow.shape[1],", "np.where(nans) flow_magnitude[nans] = 0. # Normalize hsv[..., 0] = flow_angle", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- import cv2 import", "dtype=np.uint8) flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32)) # A", "cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32)) # A couple times, we've gotten", "representation of the dense optical flow in RGB format Ref:", "hsv[..., 0] = flow_angle * 180 / np.pi / 2", "flow_to_img(flow, normalize=True): \"\"\"Convert flow to viewable image, using color hue", "OpenCV tutorial on dense optical flow, except that they map", "flow[..., 1].astype(np.float32)) # A couple times, we've gotten NaNs out", "= flow_angle * 180 / np.pi / 2 if normalize", "0. # Normalize hsv[..., 0] = flow_angle * 180 /", "hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8) flow_magnitude, flow_angle = cv2.cartToPolar(flow[...,", "the saturation plane, as we do here. Args: flow: optical", "to viewable image, using color hue to encode flow vector", "length to the value plane of the HSV color model,", "vector length. This is similar to the OpenCV tutorial on", "orientation, and color saturation to encode vector length. This is", "https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py \"\"\" hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8) flow_magnitude, flow_angle", "np.pi / 2 if normalize is True: hsv[..., 1] =", "0].astype(np.float32), flow[..., 1].astype(np.float32)) # A couple times, we've gotten NaNs", "tutorial on dense optical flow, except that they map vector", "Returns: img: viewable representation of the dense optical flow in", "above... nans = np.isnan(flow_magnitude) if np.any(nans): nans = np.where(nans) flow_magnitude[nans]", "nans = np.isnan(flow_magnitude) if np.any(nans): nans = np.where(nans) flow_magnitude[nans] =", "Normalize hsv[..., 0] = flow_angle * 180 / np.pi /", "= flow_magnitude hsv[..., 2] = 255 img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)", "flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32)) # A couple times,", "plane of the HSV color model, instead of the saturation", "flow vector orientation, and color saturation to encode vector length.", "\"\"\" hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8) flow_magnitude, flow_angle =", "similar to the OpenCV tutorial on dense optical flow, except", "as np def flow_to_img(flow, normalize=True): \"\"\"Convert flow to viewable image,", "A couple times, we've gotten NaNs out of the above...", "gotten NaNs out of the above... nans = np.isnan(flow_magnitude) if", "color saturation to encode vector length. This is similar to", "nans = np.where(nans) flow_magnitude[nans] = 0. # Normalize hsv[..., 0]", "255, cv2.NORM_MINMAX) else: hsv[..., 1] = flow_magnitude hsv[..., 2] =", "the value plane of the HSV color model, instead of", "color model, instead of the saturation plane, as we do", "we've gotten NaNs out of the above... nans = np.isnan(flow_magnitude)", "the above... nans = np.isnan(flow_magnitude) if np.any(nans): nans = np.where(nans)", "viewable image, using color hue to encode flow vector orientation,", "cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX) else: hsv[..., 1] = flow_magnitude", "encode flow vector orientation, and color saturation to encode vector", "of the above... nans = np.isnan(flow_magnitude) if np.any(nans): nans =", "-*- import cv2 import numpy as np def flow_to_img(flow, normalize=True):", "cv2 import numpy as np def flow_to_img(flow, normalize=True): \"\"\"Convert flow", "0] = flow_angle * 180 / np.pi / 2 if", "as we do here. Args: flow: optical flow normalize: Normalize", "to the value plane of the HSV color model, instead", "vector orientation, and color saturation to encode vector length. This", "= cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX) else: hsv[..., 1] =", "color hue to encode flow vector orientation, and color saturation", "flow.shape[1], 3), dtype=np.uint8) flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32))", "and color saturation to encode vector length. This is similar", "This is similar to the OpenCV tutorial on dense optical", "# -*- coding: utf-8 -*- import cv2 import numpy as", "RGB format Ref: https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py \"\"\" hsv = np.zeros((flow.shape[0], flow.shape[1], 3),", "Ref: https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py \"\"\" hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8) flow_magnitude,", "of the saturation plane, as we do here. Args: flow:", "None, 0, 255, cv2.NORM_MINMAX) else: hsv[..., 1] = flow_magnitude hsv[...,", "3), dtype=np.uint8) flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32)) #", "times, we've gotten NaNs out of the above... nans =", "on dense optical flow, except that they map vector length", "# A couple times, we've gotten NaNs out of the", "optical flow normalize: Normalize flow to 0..255 Returns: img: viewable", "couple times, we've gotten NaNs out of the above... nans", "except that they map vector length to the value plane", "utf-8 -*- import cv2 import numpy as np def flow_to_img(flow,", "the OpenCV tutorial on dense optical flow, except that they", "np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8) flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[...,", "np.isnan(flow_magnitude) if np.any(nans): nans = np.where(nans) flow_magnitude[nans] = 0. #", "flow to 0..255 Returns: img: viewable representation of the dense", "* 180 / np.pi / 2 if normalize is True:", "the dense optical flow in RGB format Ref: https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py \"\"\"", "of the HSV color model, instead of the saturation plane,", "True: hsv[..., 1] = cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX) else:", "flow to viewable image, using color hue to encode flow", "optical flow in RGB format Ref: https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py \"\"\" hsv =", "image, using color hue to encode flow vector orientation, and", "python3 # -*- coding: utf-8 -*- import cv2 import numpy", "1] = flow_magnitude hsv[..., 2] = 255 img = cv2.cvtColor(hsv,", "= np.isnan(flow_magnitude) if np.any(nans): nans = np.where(nans) flow_magnitude[nans] = 0.", "1].astype(np.float32)) # A couple times, we've gotten NaNs out of", "flow_magnitude[nans] = 0. # Normalize hsv[..., 0] = flow_angle *", "normalize=True): \"\"\"Convert flow to viewable image, using color hue to", "= cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32)) # A couple times, we've", "flow_angle * 180 / np.pi / 2 if normalize is", "def flow_to_img(flow, normalize=True): \"\"\"Convert flow to viewable image, using color", "else: hsv[..., 1] = flow_magnitude hsv[..., 2] = 255 img", "-*- coding: utf-8 -*- import cv2 import numpy as np", "if normalize is True: hsv[..., 1] = cv2.normalize(flow_magnitude, None, 0,", "if np.any(nans): nans = np.where(nans) flow_magnitude[nans] = 0. # Normalize", "1] = cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX) else: hsv[..., 1]", "normalize is True: hsv[..., 1] = cv2.normalize(flow_magnitude, None, 0, 255,", "flow normalize: Normalize flow to 0..255 Returns: img: viewable representation", "flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32)) # A couple", "0, 255, cv2.NORM_MINMAX) else: hsv[..., 1] = flow_magnitude hsv[..., 2]", "flow_magnitude hsv[..., 2] = 255 img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) return", "= np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8) flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32),", "hsv[..., 1] = flow_magnitude hsv[..., 2] = 255 img =", "flow in RGB format Ref: https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py \"\"\" hsv = np.zeros((flow.shape[0],", "cv2.NORM_MINMAX) else: hsv[..., 1] = flow_magnitude hsv[..., 2] = 255", "is True: hsv[..., 1] = cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX)", "is similar to the OpenCV tutorial on dense optical flow,", "img: viewable representation of the dense optical flow in RGB", "Args: flow: optical flow normalize: Normalize flow to 0..255 Returns:", "NaNs out of the above... nans = np.isnan(flow_magnitude) if np.any(nans):", "saturation plane, as we do here. Args: flow: optical flow", "format Ref: https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py \"\"\" hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)", "model, instead of the saturation plane, as we do here.", "180 / np.pi / 2 if normalize is True: hsv[...,", "Normalize flow to 0..255 Returns: img: viewable representation of the", "hue to encode flow vector orientation, and color saturation to", "/ np.pi / 2 if normalize is True: hsv[..., 1]", "numpy as np def flow_to_img(flow, normalize=True): \"\"\"Convert flow to viewable", "out of the above... nans = np.isnan(flow_magnitude) if np.any(nans): nans", "to encode vector length. This is similar to the OpenCV", "to the OpenCV tutorial on dense optical flow, except that", "map vector length to the value plane of the HSV", "value plane of the HSV color model, instead of the", "using color hue to encode flow vector orientation, and color", "np def flow_to_img(flow, normalize=True): \"\"\"Convert flow to viewable image, using", "import cv2 import numpy as np def flow_to_img(flow, normalize=True): \"\"\"Convert", "2 if normalize is True: hsv[..., 1] = cv2.normalize(flow_magnitude, None,", "vector length to the value plane of the HSV color", "normalize: Normalize flow to 0..255 Returns: img: viewable representation of", "to encode flow vector orientation, and color saturation to encode", "viewable representation of the dense optical flow in RGB format", "the HSV color model, instead of the saturation plane, as", "HSV color model, instead of the saturation plane, as we", "they map vector length to the value plane of the", "of the dense optical flow in RGB format Ref: https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py" ]
[ "B*ratr < R: leafcount = leafcount + 1 total =", "speed = 8 leafcount = 0 total = 0 for", "= 0 total = 0 for i in range(0, int(imWidth/speed)):", "G and B*ratgb < G and B*ratr < R: leafcount", "parser = argparse.ArgumentParser() parser.add_argument('fname') parser.add_argument('pref', default=\"\", nargs=\"?\") args = parser.parse_args()", "ratgb = 1.66 ming = 10 ratr = 2 speed", "import math parser = argparse.ArgumentParser() parser.add_argument('fname') parser.add_argument('pref', default=\"\", nargs=\"?\") args", "math parser = argparse.ArgumentParser() parser.add_argument('fname') parser.add_argument('pref', default=\"\", nargs=\"?\") args =", "8 leafcount = 0 total = 0 for i in", "= 8 leafcount = 0 total = 0 for i", "total = 0 for i in range(0, int(imWidth/speed)): for j", "< R: leafcount = leafcount + 1 total = total+1", "Image.open(args.fname) RGB = im.convert('RGB') imWidth, imHeight = im.size ratg =", "import argparse from PIL import Image, ImageStat import math parser", "<filename>DistributedRL/Gateway/build/Code/sim/Parser/LAI/GreenIndex.py<gh_stars>1-10 import argparse from PIL import Image, ImageStat import math", "= argparse.ArgumentParser() parser.add_argument('fname') parser.add_argument('pref', default=\"\", nargs=\"?\") args = parser.parse_args() im", "2 speed = 8 leafcount = 0 total = 0", "im.size ratg = 1.2 ratgb = 1.66 ming = 10", "leafcount = 0 total = 0 for i in range(0,", "ratr = 2 speed = 8 leafcount = 0 total", "in range(0, int(imWidth/speed)): for j in range(0, int(imHeight/speed)): R,G,B =", "= im.size ratg = 1.2 ratgb = 1.66 ming =", "10 ratr = 2 speed = 8 leafcount = 0", "nargs=\"?\") args = parser.parse_args() im = Image.open(args.fname) RGB = im.convert('RGB')", "= 2 speed = 8 leafcount = 0 total =", "R,G,B = RGB.getpixel((i*speed,j*speed)) if R*ratg < G and B*ratgb <", "0 for i in range(0, int(imWidth/speed)): for j in range(0,", "j in range(0, int(imHeight/speed)): R,G,B = RGB.getpixel((i*speed,j*speed)) if R*ratg <", "int(imHeight/speed)): R,G,B = RGB.getpixel((i*speed,j*speed)) if R*ratg < G and B*ratgb", "= Image.open(args.fname) RGB = im.convert('RGB') imWidth, imHeight = im.size ratg", "RGB.getpixel((i*speed,j*speed)) if R*ratg < G and B*ratgb < G and", "B*ratgb < G and B*ratr < R: leafcount = leafcount", "< G and B*ratr < R: leafcount = leafcount +", "and B*ratr < R: leafcount = leafcount + 1 total", "R: leafcount = leafcount + 1 total = total+1 print(\"LAI=\"+str(float(leafcount)/total))", "args = parser.parse_args() im = Image.open(args.fname) RGB = im.convert('RGB') imWidth,", "= 1.66 ming = 10 ratr = 2 speed =", "and B*ratgb < G and B*ratr < R: leafcount =", "= im.convert('RGB') imWidth, imHeight = im.size ratg = 1.2 ratgb", "= 0 for i in range(0, int(imWidth/speed)): for j in", "range(0, int(imHeight/speed)): R,G,B = RGB.getpixel((i*speed,j*speed)) if R*ratg < G and", "in range(0, int(imHeight/speed)): R,G,B = RGB.getpixel((i*speed,j*speed)) if R*ratg < G", "Image, ImageStat import math parser = argparse.ArgumentParser() parser.add_argument('fname') parser.add_argument('pref', default=\"\",", "im = Image.open(args.fname) RGB = im.convert('RGB') imWidth, imHeight = im.size", "PIL import Image, ImageStat import math parser = argparse.ArgumentParser() parser.add_argument('fname')", "0 total = 0 for i in range(0, int(imWidth/speed)): for", "= 1.2 ratgb = 1.66 ming = 10 ratr =", "im.convert('RGB') imWidth, imHeight = im.size ratg = 1.2 ratgb =", "ImageStat import math parser = argparse.ArgumentParser() parser.add_argument('fname') parser.add_argument('pref', default=\"\", nargs=\"?\")", "G and B*ratr < R: leafcount = leafcount + 1", "parser.add_argument('fname') parser.add_argument('pref', default=\"\", nargs=\"?\") args = parser.parse_args() im = Image.open(args.fname)", "parser.add_argument('pref', default=\"\", nargs=\"?\") args = parser.parse_args() im = Image.open(args.fname) RGB", "argparse.ArgumentParser() parser.add_argument('fname') parser.add_argument('pref', default=\"\", nargs=\"?\") args = parser.parse_args() im =", "= parser.parse_args() im = Image.open(args.fname) RGB = im.convert('RGB') imWidth, imHeight", "1.2 ratgb = 1.66 ming = 10 ratr = 2", "default=\"\", nargs=\"?\") args = parser.parse_args() im = Image.open(args.fname) RGB =", "= RGB.getpixel((i*speed,j*speed)) if R*ratg < G and B*ratgb < G", "R*ratg < G and B*ratgb < G and B*ratr <", "from PIL import Image, ImageStat import math parser = argparse.ArgumentParser()", "argparse from PIL import Image, ImageStat import math parser =", "for j in range(0, int(imHeight/speed)): R,G,B = RGB.getpixel((i*speed,j*speed)) if R*ratg", "ratg = 1.2 ratgb = 1.66 ming = 10 ratr", "if R*ratg < G and B*ratgb < G and B*ratr", "i in range(0, int(imWidth/speed)): for j in range(0, int(imHeight/speed)): R,G,B", "parser.parse_args() im = Image.open(args.fname) RGB = im.convert('RGB') imWidth, imHeight =", "RGB = im.convert('RGB') imWidth, imHeight = im.size ratg = 1.2", "ming = 10 ratr = 2 speed = 8 leafcount", "import Image, ImageStat import math parser = argparse.ArgumentParser() parser.add_argument('fname') parser.add_argument('pref',", "1.66 ming = 10 ratr = 2 speed = 8", "range(0, int(imWidth/speed)): for j in range(0, int(imHeight/speed)): R,G,B = RGB.getpixel((i*speed,j*speed))", "int(imWidth/speed)): for j in range(0, int(imHeight/speed)): R,G,B = RGB.getpixel((i*speed,j*speed)) if", "imHeight = im.size ratg = 1.2 ratgb = 1.66 ming", "for i in range(0, int(imWidth/speed)): for j in range(0, int(imHeight/speed)):", "imWidth, imHeight = im.size ratg = 1.2 ratgb = 1.66", "= 10 ratr = 2 speed = 8 leafcount =", "< G and B*ratgb < G and B*ratr < R:" ]
[ "choices=['GiB', 'TiB', 'gib', 'tib'], default='TiB') args = parser.parse_args() vip =", "consumed / capacity, 0) storageReduction = round(float(logicalSize) / consumed, 1)", "% (startMsecs, cluster['clusterId'], endMsecs)) parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity') parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed')", "'--unit', type=str, choices=['GiB', 'TiB', 'gib', 'tib'], default='TiB') args = parser.parse_args()", "dataWrittenStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'],", "useApiKey=True, noretry=True) # outfile now = datetime.now() # cluster =", "in sorted(stats.keys()): capacity = stats[clusterName]['capacity'] consumed = stats[clusterName]['consumed'] dataIn =", "0) storageReduction = round(float(logicalSize) / consumed, 1) dataReduction = round(float(dataIn)", "= args.vip username = args.username domain = args.domain password =", "dateToUsecs(now.strftime(\"%Y-%m-%d %H:%M:%S\")) / 1000 startMsecs = (timeAgo(2, 'days')) / 1000", "'--password', type=str, default=None) # optional password parser.add_argument('-n', '--unit', type=str, choices=['GiB',", "= 'heliosStorageStats-%s.csv' % dateString f = codecs.open(outfile, 'w') # headings", "parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity') parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed') parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn') parseStats(cluster['name'],", "in heliosClusters(): heliosCluster(cluster) print(' %s' % cluster['name']) capacityStats = api('get',", "'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs, cluster['clusterId'], startMsecs)) consumedStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' %", "arguments import argparse parser = argparse.ArgumentParser() parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com')", "api('get', 'cluster') dateString = now.strftime(\"%Y-%m-%d\") outfile = 'heliosStorageStats-%s.csv' % dateString", "startMsecs)) consumedStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) dataInStats", "cluster in heliosClusters(): heliosCluster(cluster) print(' %s' % cluster['name']) capacityStats =", "= <PASSWORD> unit = args.unit if unit.lower() == 'tib': multiplier", "parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize') for clusterName in sorted(stats.keys()): capacity = stats[clusterName]['capacity']", "dataPoint['data']['int64Value'] endMsecs = dateToUsecs(now.strftime(\"%Y-%m-%d %H:%M:%S\")) / 1000 startMsecs = (timeAgo(2,", "stats.keys(): stats[clusterName] = {} stats[clusterName][statName] = dataPoint['data']['int64Value'] endMsecs = dateToUsecs(now.strftime(\"%Y-%m-%d", "(timeAgo(2, 'days')) / 1000 print('\\nGathering cluster stats:\\n') for cluster in", "cluster['clusterId'], startMsecs)) consumedStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs))", "default='TiB') args = parser.parse_args() vip = args.vip username = args.username", "1000 startMsecs = (timeAgo(2, 'days')) / 1000 print('\\nGathering cluster stats:\\n')", "unit, unit, unit, unit)) stats = {} def parseStats(clusterName, dataPoint,", "apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=True, noretry=True) # outfile now =", "(endMsecs, cluster['clusterId'], startMsecs)) consumedStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'],", "% (clusterName, toUnits(capacity), toUnits(consumed), toUnits(free), pctUsed, toUnits(dataIn), toUnits(dataWritten), storageReduction, dataReduction))", "domain=domain, password=password, useApiKey=True, noretry=True) # outfile now = datetime.now() #", "= api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs))", "python \"\"\"cluster storage stats for python\"\"\" # import pyhesity wrapper", "clusterName in sorted(stats.keys()): capacity = stats[clusterName]['capacity'] consumed = stats[clusterName]['consumed'] dataIn", "vip = args.vip username = args.username domain = args.domain password", "= stats[clusterName]['consumed'] dataIn = stats[clusterName]['dataIn'] dataWritten = stats[clusterName]['dataWritten'] logicalSize =", "capacityStats['dataPointVec'][0], 'capacity') parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed') parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn') parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0],", "dataWritten, 1) f.write('\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\\n' % (clusterName, toUnits(capacity), toUnits(consumed), toUnits(free), pctUsed, toUnits(dataIn),", "logicalSizeStats['dataPointVec'][0], 'logicalSize') for clusterName in sorted(stats.keys()): capacity = stats[clusterName]['capacity'] consumed", "(ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) logicalSizeStats = api('get',", "f = codecs.open(outfile, 'w') # headings f.write('Date,Capacity (%s),Consumed (%s),Free (%s),Used", "= api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity')", "stats for python\"\"\" # import pyhesity wrapper module from pyhesity", "cluster['name'], cluster['clusterId'], endMsecs)) dataWrittenStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' %", "from pyhesity import * from datetime import datetime import codecs", "logicalSize = stats[clusterName]['logicalSize'] free = capacity - consumed pctUsed =", "for cluster in heliosClusters(): heliosCluster(cluster) print(' %s' % cluster['name']) capacityStats", "args.username domain = args.domain password = <PASSWORD> unit = args.unit", "(ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) dataWrittenStats = api('get',", "consumed pctUsed = round(100 * consumed / capacity, 0) storageReduction", "storage stats for python\"\"\" # import pyhesity wrapper module from", "optional password parser.add_argument('-n', '--unit', type=str, choices=['GiB', 'TiB', 'gib', 'tib'], default='TiB')", "* 1024 * 1024 unit = 'TiB' else: multiplier =", "now = datetime.now() # cluster = api('get', 'cluster') dateString =", "connect to parser.add_argument('-u', '--username', type=str, required=True) # username parser.add_argument('-d', '--domain',", "dataWritten = stats[clusterName]['dataWritten'] logicalSize = stats[clusterName]['logicalSize'] free = capacity -", "to connect to parser.add_argument('-u', '--username', type=str, required=True) # username parser.add_argument('-d',", "parser.parse_args() vip = args.vip username = args.username domain = args.domain", "/ dataWritten, 1) f.write('\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\\n' % (clusterName, toUnits(capacity), toUnits(consumed), toUnits(free), pctUsed,", "# authenticate apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=True, noretry=True) # outfile", "/ 1000 print('\\nGathering cluster stats:\\n') for cluster in heliosClusters(): heliosCluster(cluster)", "heliosCluster(cluster) print(' %s' % cluster['name']) capacityStats = api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' %", "% dateString f = codecs.open(outfile, 'w') # headings f.write('Date,Capacity (%s),Consumed", "(%s),Used %%,Data In (%s),Data Written (%s),Storage Reduction,Data Reduction\\n' % (unit,", "endMsecs)) dataInStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'],", "dataWrittenStats['dataPointVec'][0], 'dataWritten') parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize') for clusterName in sorted(stats.keys()): capacity", "command line arguments import argparse parser = argparse.ArgumentParser() parser.add_argument('-v', '--vip',", "cluster['clusterId'], endMsecs)) dataInStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs,", "* 1024 unit = 'GiB' def toUnits(value): return round(float(value) /", "parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain - defaults to", "datetime import datetime import codecs # command line arguments import", "type=str, default='local') # (optional) domain - defaults to local parser.add_argument('-pwd',", "api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) logicalSizeStats", "= api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs))", "stats[clusterName]['capacity'] consumed = stats[clusterName]['consumed'] dataIn = stats[clusterName]['dataIn'] dataWritten = stats[clusterName]['dataWritten']", "'TiB' else: multiplier = 1024 * 1024 * 1024 unit", "# import pyhesity wrapper module from pyhesity import * from", "Reduction,Data Reduction\\n' % (unit, unit, unit, unit, unit)) stats =", "1) # authenticate apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=True, noretry=True) #", "cluster to connect to parser.add_argument('-u', '--username', type=str, required=True) # username", "from datetime import datetime import codecs # command line arguments", "'days')) / 1000 print('\\nGathering cluster stats:\\n') for cluster in heliosClusters():", "pctUsed = round(100 * consumed / capacity, 0) storageReduction =", "unit, unit)) stats = {} def parseStats(clusterName, dataPoint, statName): if", "= stats[clusterName]['logicalSize'] free = capacity - consumed pctUsed = round(100", "unit)) stats = {} def parseStats(clusterName, dataPoint, statName): if clusterName", "# cluster = api('get', 'cluster') dateString = now.strftime(\"%Y-%m-%d\") outfile =", "def parseStats(clusterName, dataPoint, statName): if clusterName not in stats.keys(): stats[clusterName]", "= round(float(dataIn) / dataWritten, 1) f.write('\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\\n' % (clusterName, toUnits(capacity), toUnits(consumed),", "1024 * 1024 * 1024 unit = 'GiB' def toUnits(value):", "parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten') parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize') for clusterName in sorted(stats.keys()):", "% (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) dataWrittenStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID", "= api('get', 'cluster') dateString = now.strftime(\"%Y-%m-%d\") outfile = 'heliosStorageStats-%s.csv' %", "startMsecs = (timeAgo(2, 'days')) / 1000 print('\\nGathering cluster stats:\\n') for", "capacityStats = api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs, cluster['clusterId'], startMsecs)) consumedStats =", "'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) dataInStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID", "noretry=True) # outfile now = datetime.now() # cluster = api('get',", "== 'tib': multiplier = 1024 * 1024 * 1024 *", "print('\\nGathering cluster stats:\\n') for cluster in heliosClusters(): heliosCluster(cluster) print(' %s'", "stats[clusterName][statName] = dataPoint['data']['int64Value'] endMsecs = dateToUsecs(now.strftime(\"%Y-%m-%d %H:%M:%S\")) / 1000 startMsecs", "'tib': multiplier = 1024 * 1024 * 1024 * 1024", "free = capacity - consumed pctUsed = round(100 * consumed", "default=None) # optional password parser.add_argument('-n', '--unit', type=str, choices=['GiB', 'TiB', 'gib',", "%s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) dataWrittenStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s", "heliosClusters(): heliosCluster(cluster) print(' %s' % cluster['name']) capacityStats = api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s'", "1) dataReduction = round(float(dataIn) / dataWritten, 1) f.write('\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\\n' % (clusterName,", "'tib'], default='TiB') args = parser.parse_args() vip = args.vip username =", "endMsecs)) dataWrittenStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'],", "endMsecs = dateToUsecs(now.strftime(\"%Y-%m-%d %H:%M:%S\")) / 1000 startMsecs = (timeAgo(2, 'days'))", "round(float(dataIn) / dataWritten, 1) f.write('\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\\n' % (clusterName, toUnits(capacity), toUnits(consumed), toUnits(free),", "module from pyhesity import * from datetime import datetime import", "toUnits(consumed), toUnits(free), pctUsed, toUnits(dataIn), toUnits(dataWritten), storageReduction, dataReduction)) f.close() print('\\nOutput saved", "args.vip username = args.username domain = args.domain password = <PASSWORD>", "#!/usr/bin/env python \"\"\"cluster storage stats for python\"\"\" # import pyhesity", "wrapper module from pyhesity import * from datetime import datetime", "endMsecs)) parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity') parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed') parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn')", "# headings f.write('Date,Capacity (%s),Consumed (%s),Free (%s),Used %%,Data In (%s),Data Written", "'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) dataWrittenStats =", "statName): if clusterName not in stats.keys(): stats[clusterName] = {} stats[clusterName][statName]", "for python\"\"\" # import pyhesity wrapper module from pyhesity import", "datetime.now() # cluster = api('get', 'cluster') dateString = now.strftime(\"%Y-%m-%d\") outfile", "/ multiplier, 1) # authenticate apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=True,", "storageReduction = round(float(logicalSize) / consumed, 1) dataReduction = round(float(dataIn) /", "'--domain', type=str, default='local') # (optional) domain - defaults to local", "args.domain password = <PASSWORD> unit = args.unit if unit.lower() ==", "pyhesity import * from datetime import datetime import codecs #", "* 1024 unit = 'TiB' else: multiplier = 1024 *", "multiplier = 1024 * 1024 * 1024 unit = 'GiB'", "(startMsecs, cluster['clusterId'], endMsecs)) parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity') parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed') parseStats(cluster['name'],", "= stats[clusterName]['dataWritten'] logicalSize = stats[clusterName]['logicalSize'] free = capacity - consumed", "cluster['clusterId'], endMsecs)) parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity') parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed') parseStats(cluster['name'], dataInStats['dataPointVec'][0],", "toUnits(capacity), toUnits(consumed), toUnits(free), pctUsed, toUnits(dataIn), toUnits(dataWritten), storageReduction, dataReduction)) f.close() print('\\nOutput", "dataReduction = round(float(dataIn) / dataWritten, 1) f.write('\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\\n' % (clusterName, toUnits(capacity),", "= dataPoint['data']['int64Value'] endMsecs = dateToUsecs(now.strftime(\"%Y-%m-%d %H:%M:%S\")) / 1000 startMsecs =", "/ 1000 startMsecs = (timeAgo(2, 'days')) / 1000 print('\\nGathering cluster", "Written (%s),Storage Reduction,Data Reduction\\n' % (unit, unit, unit, unit, unit))", "<gh_stars>0 #!/usr/bin/env python \"\"\"cluster storage stats for python\"\"\" # import", "# username parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain -", "= parser.parse_args() vip = args.vip username = args.username domain =", "'GiB' def toUnits(value): return round(float(value) / multiplier, 1) # authenticate", "%s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) logicalSizeStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s'", "else: multiplier = 1024 * 1024 * 1024 unit =", "type=str, default=None) # optional password parser.add_argument('-n', '--unit', type=str, choices=['GiB', 'TiB',", "dataInStats['dataPointVec'][0], 'dataIn') parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten') parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize') for clusterName", "<PASSWORD> unit = args.unit if unit.lower() == 'tib': multiplier =", "1024 * 1024 * 1024 * 1024 unit = 'TiB'", "# command line arguments import argparse parser = argparse.ArgumentParser() parser.add_argument('-v',", "= api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) dataInStats = api('get',", "datetime import codecs # command line arguments import argparse parser", "'dataWritten') parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize') for clusterName in sorted(stats.keys()): capacity =", "* from datetime import datetime import codecs # command line", "local parser.add_argument('-pwd', '--password', type=str, default=None) # optional password parser.add_argument('-n', '--unit',", "\"\"\"cluster storage stats for python\"\"\" # import pyhesity wrapper module", "= 'GiB' def toUnits(value): return round(float(value) / multiplier, 1) #", "outfile = 'heliosStorageStats-%s.csv' % dateString f = codecs.open(outfile, 'w') #", "# outfile now = datetime.now() # cluster = api('get', 'cluster')", "'w') # headings f.write('Date,Capacity (%s),Consumed (%s),Free (%s),Used %%,Data In (%s),Data", "parseStats(clusterName, dataPoint, statName): if clusterName not in stats.keys(): stats[clusterName] =", "- defaults to local parser.add_argument('-pwd', '--password', type=str, default=None) # optional", "clusterName not in stats.keys(): stats[clusterName] = {} stats[clusterName][statName] = dataPoint['data']['int64Value']", "= stats[clusterName]['capacity'] consumed = stats[clusterName]['consumed'] dataIn = stats[clusterName]['dataIn'] dataWritten =", "f.write('\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\\n' % (clusterName, toUnits(capacity), toUnits(consumed), toUnits(free), pctUsed, toUnits(dataIn), toUnits(dataWritten), storageReduction,", "domain - defaults to local parser.add_argument('-pwd', '--password', type=str, default=None) #", "args.unit if unit.lower() == 'tib': multiplier = 1024 * 1024", "round(float(logicalSize) / consumed, 1) dataReduction = round(float(dataIn) / dataWritten, 1)", "import codecs # command line arguments import argparse parser =", "def toUnits(value): return round(float(value) / multiplier, 1) # authenticate apiauth(vip=vip,", "'capacity') parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed') parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn') parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten')", "endMsecs)) logicalSizeStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) parseStats(cluster['name'],", "if unit.lower() == 'tib': multiplier = 1024 * 1024 *", "stats = {} def parseStats(clusterName, dataPoint, statName): if clusterName not", "(clusterName, toUnits(capacity), toUnits(consumed), toUnits(free), pctUsed, toUnits(dataIn), toUnits(dataWritten), storageReduction, dataReduction)) f.close()", "sorted(stats.keys()): capacity = stats[clusterName]['capacity'] consumed = stats[clusterName]['consumed'] dataIn = stats[clusterName]['dataIn']", "parser = argparse.ArgumentParser() parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com') # cluster to", "# optional password parser.add_argument('-n', '--unit', type=str, choices=['GiB', 'TiB', 'gib', 'tib'],", "(startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) dataWrittenStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s'", "stats[clusterName]['consumed'] dataIn = stats[clusterName]['dataIn'] dataWritten = stats[clusterName]['dataWritten'] logicalSize = stats[clusterName]['logicalSize']", "parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn') parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten') parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize') for", "(startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) logicalSizeStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs,", "= now.strftime(\"%Y-%m-%d\") outfile = 'heliosStorageStats-%s.csv' % dateString f = codecs.open(outfile,", "round(100 * consumed / capacity, 0) storageReduction = round(float(logicalSize) /", "{} def parseStats(clusterName, dataPoint, statName): if clusterName not in stats.keys():", "defaults to local parser.add_argument('-pwd', '--password', type=str, default=None) # optional password", "'gib', 'tib'], default='TiB') args = parser.parse_args() vip = args.vip username", "= {} stats[clusterName][statName] = dataPoint['data']['int64Value'] endMsecs = dateToUsecs(now.strftime(\"%Y-%m-%d %H:%M:%S\")) /", "= args.username domain = args.domain password = <PASSWORD> unit =", "now.strftime(\"%Y-%m-%d\") outfile = 'heliosStorageStats-%s.csv' % dateString f = codecs.open(outfile, 'w')", "cluster['clusterId'], endMsecs)) logicalSizeStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs))", "import datetime import codecs # command line arguments import argparse", "(%s),Storage Reduction,Data Reduction\\n' % (unit, unit, unit, unit, unit)) stats", "= round(float(logicalSize) / consumed, 1) dataReduction = round(float(dataIn) / dataWritten,", "'heliosStorageStats-%s.csv' % dateString f = codecs.open(outfile, 'w') # headings f.write('Date,Capacity", "= round(100 * consumed / capacity, 0) storageReduction = round(float(logicalSize)", "multiplier, 1) # authenticate apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=True, noretry=True)", "api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs, cluster['clusterId'], startMsecs)) consumedStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s'", "'dataIn') parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten') parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize') for clusterName in", "1024 * 1024 * 1024 unit = 'TiB' else: multiplier", "Reduction\\n' % (unit, unit, unit, unit, unit)) stats = {}", "unit, unit, unit)) stats = {} def parseStats(clusterName, dataPoint, statName):", "python\"\"\" # import pyhesity wrapper module from pyhesity import *", "'TiB', 'gib', 'tib'], default='TiB') args = parser.parse_args() vip = args.vip", "1024 unit = 'TiB' else: multiplier = 1024 * 1024", "'cluster') dateString = now.strftime(\"%Y-%m-%d\") outfile = 'heliosStorageStats-%s.csv' % dateString f", "%s' % cluster['name']) capacityStats = api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs, cluster['clusterId'],", "multiplier = 1024 * 1024 * 1024 * 1024 unit", "1024 * 1024 unit = 'TiB' else: multiplier = 1024", "% (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) logicalSizeStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' %", "unit = 'TiB' else: multiplier = 1024 * 1024 *", "= capacity - consumed pctUsed = round(100 * consumed /", "# cluster to connect to parser.add_argument('-u', '--username', type=str, required=True) #", "% (endMsecs, cluster['clusterId'], startMsecs)) consumedStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs,", "password parser.add_argument('-n', '--unit', type=str, choices=['GiB', 'TiB', 'gib', 'tib'], default='TiB') args", "f.write('Date,Capacity (%s),Consumed (%s),Free (%s),Used %%,Data In (%s),Data Written (%s),Storage Reduction,Data", "api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity') parseStats(cluster['name'],", "pctUsed, toUnits(dataIn), toUnits(dataWritten), storageReduction, dataReduction)) f.close() print('\\nOutput saved to %s\\n'", "capacity = stats[clusterName]['capacity'] consumed = stats[clusterName]['consumed'] dataIn = stats[clusterName]['dataIn'] dataWritten", "1024 unit = 'GiB' def toUnits(value): return round(float(value) / multiplier,", "password = <PASSWORD> unit = args.unit if unit.lower() == 'tib':", "stats[clusterName] = {} stats[clusterName][statName] = dataPoint['data']['int64Value'] endMsecs = dateToUsecs(now.strftime(\"%Y-%m-%d %H:%M:%S\"))", "pyhesity wrapper module from pyhesity import * from datetime import", "* consumed / capacity, 0) storageReduction = round(float(logicalSize) / consumed,", "'consumed') parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn') parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten') parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize')", "In (%s),Data Written (%s),Storage Reduction,Data Reduction\\n' % (unit, unit, unit,", "parser.add_argument('-n', '--unit', type=str, choices=['GiB', 'TiB', 'gib', 'tib'], default='TiB') args =", "%%,Data In (%s),Data Written (%s),Storage Reduction,Data Reduction\\n' % (unit, unit,", "unit.lower() == 'tib': multiplier = 1024 * 1024 * 1024", "argparse.ArgumentParser() parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com') # cluster to connect to", "username=username, domain=domain, password=password, useApiKey=True, noretry=True) # outfile now = datetime.now()", "'--username', type=str, required=True) # username parser.add_argument('-d', '--domain', type=str, default='local') #", "dateString = now.strftime(\"%Y-%m-%d\") outfile = 'heliosStorageStats-%s.csv' % dateString f =", "'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity') parseStats(cluster['name'], consumedStats['dataPointVec'][0],", "codecs # command line arguments import argparse parser = argparse.ArgumentParser()", "password=password, useApiKey=True, noretry=True) # outfile now = datetime.now() # cluster", "cluster = api('get', 'cluster') dateString = now.strftime(\"%Y-%m-%d\") outfile = 'heliosStorageStats-%s.csv'", "consumed, 1) dataReduction = round(float(dataIn) / dataWritten, 1) f.write('\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\\n' %", "line arguments import argparse parser = argparse.ArgumentParser() parser.add_argument('-v', '--vip', type=str,", "stats[clusterName]['dataWritten'] logicalSize = stats[clusterName]['logicalSize'] free = capacity - consumed pctUsed", "parser.add_argument('-u', '--username', type=str, required=True) # username parser.add_argument('-d', '--domain', type=str, default='local')", "import * from datetime import datetime import codecs # command", "1000 print('\\nGathering cluster stats:\\n') for cluster in heliosClusters(): heliosCluster(cluster) print('", "consumed = stats[clusterName]['consumed'] dataIn = stats[clusterName]['dataIn'] dataWritten = stats[clusterName]['dataWritten'] logicalSize", "dateString f = codecs.open(outfile, 'w') # headings f.write('Date,Capacity (%s),Consumed (%s),Free", "unit = 'GiB' def toUnits(value): return round(float(value) / multiplier, 1)", "/ capacity, 0) storageReduction = round(float(logicalSize) / consumed, 1) dataReduction", "= 'TiB' else: multiplier = 1024 * 1024 * 1024", "logicalSizeStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) parseStats(cluster['name'], capacityStats['dataPointVec'][0],", "consumedStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) dataInStats =", "toUnits(dataIn), toUnits(dataWritten), storageReduction, dataReduction)) f.close() print('\\nOutput saved to %s\\n' %", "* 1024 * 1024 unit = 'GiB' def toUnits(value): return", "% (unit, unit, unit, unit, unit)) stats = {} def", "argparse parser = argparse.ArgumentParser() parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com') # cluster", "cluster['clusterId'], endMsecs)) dataWrittenStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs,", "dataPoint, statName): if clusterName not in stats.keys(): stats[clusterName] = {}", "round(float(value) / multiplier, 1) # authenticate apiauth(vip=vip, username=username, domain=domain, password=password,", "toUnits(dataWritten), storageReduction, dataReduction)) f.close() print('\\nOutput saved to %s\\n' % outfile)", "'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) logicalSizeStats =", "= argparse.ArgumentParser() parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com') # cluster to connect", "api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs)) dataInStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s", "toUnits(value): return round(float(value) / multiplier, 1) # authenticate apiauth(vip=vip, username=username,", "= api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs, cluster['clusterId'], startMsecs)) consumedStats = api('get',", "codecs.open(outfile, 'w') # headings f.write('Date,Capacity (%s),Consumed (%s),Free (%s),Used %%,Data In", "for clusterName in sorted(stats.keys()): capacity = stats[clusterName]['capacity'] consumed = stats[clusterName]['consumed']", "parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com') # cluster to connect to parser.add_argument('-u',", "(unit, unit, unit, unit, unit)) stats = {} def parseStats(clusterName,", "toUnits(free), pctUsed, toUnits(dataIn), toUnits(dataWritten), storageReduction, dataReduction)) f.close() print('\\nOutput saved to", "'logicalSize') for clusterName in sorted(stats.keys()): capacity = stats[clusterName]['capacity'] consumed =", "- consumed pctUsed = round(100 * consumed / capacity, 0)", "outfile now = datetime.now() # cluster = api('get', 'cluster') dateString", "capacity, 0) storageReduction = round(float(logicalSize) / consumed, 1) dataReduction =", "required=True) # username parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain", "dataInStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'],", "# (optional) domain - defaults to local parser.add_argument('-pwd', '--password', type=str,", "import argparse parser = argparse.ArgumentParser() parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com') #", "dataIn = stats[clusterName]['dataIn'] dataWritten = stats[clusterName]['dataWritten'] logicalSize = stats[clusterName]['logicalSize'] free", "username = args.username domain = args.domain password = <PASSWORD> unit", "= dateToUsecs(now.strftime(\"%Y-%m-%d %H:%M:%S\")) / 1000 startMsecs = (timeAgo(2, 'days')) /", "capacity - consumed pctUsed = round(100 * consumed / capacity,", "args = parser.parse_args() vip = args.vip username = args.username domain", "unit = args.unit if unit.lower() == 'tib': multiplier = 1024", "consumedStats['dataPointVec'][0], 'consumed') parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn') parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten') parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0],", "= args.unit if unit.lower() == 'tib': multiplier = 1024 *", "1024 * 1024 unit = 'GiB' def toUnits(value): return round(float(value)", "headings f.write('Date,Capacity (%s),Consumed (%s),Free (%s),Used %%,Data In (%s),Data Written (%s),Storage", "type=str, default='helios.cohesity.com') # cluster to connect to parser.add_argument('-u', '--username', type=str,", "return round(float(value) / multiplier, 1) # authenticate apiauth(vip=vip, username=username, domain=domain,", "domain = args.domain password = <PASSWORD> unit = args.unit if", "= args.domain password = <PASSWORD> unit = args.unit if unit.lower()", "(%s),Free (%s),Used %%,Data In (%s),Data Written (%s),Storage Reduction,Data Reduction\\n' %", "= datetime.now() # cluster = api('get', 'cluster') dateString = now.strftime(\"%Y-%m-%d\")", "= (timeAgo(2, 'days')) / 1000 print('\\nGathering cluster stats:\\n') for cluster", "* 1024 * 1024 * 1024 unit = 'TiB' else:", "%H:%M:%S\")) / 1000 startMsecs = (timeAgo(2, 'days')) / 1000 print('\\nGathering", "(%s),Data Written (%s),Storage Reduction,Data Reduction\\n' % (unit, unit, unit, unit,", "% (startMsecs, cluster['clusterId'], endMsecs)) dataInStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s'", "= stats[clusterName]['dataIn'] dataWritten = stats[clusterName]['dataWritten'] logicalSize = stats[clusterName]['logicalSize'] free =", "stats[clusterName]['dataIn'] dataWritten = stats[clusterName]['dataWritten'] logicalSize = stats[clusterName]['logicalSize'] free = capacity", "= codecs.open(outfile, 'w') # headings f.write('Date,Capacity (%s),Consumed (%s),Free (%s),Used %%,Data", "print(' %s' % cluster['name']) capacityStats = api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs,", "(%s),Consumed (%s),Free (%s),Used %%,Data In (%s),Data Written (%s),Storage Reduction,Data Reduction\\n'", "parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed') parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn') parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten') parseStats(cluster['name'],", "username parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain - defaults", "not in stats.keys(): stats[clusterName] = {} stats[clusterName][statName] = dataPoint['data']['int64Value'] endMsecs", "cluster['name']) capacityStats = api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs, cluster['clusterId'], startMsecs)) consumedStats", "{} stats[clusterName][statName] = dataPoint['data']['int64Value'] endMsecs = dateToUsecs(now.strftime(\"%Y-%m-%d %H:%M:%S\")) / 1000", "type=str, required=True) # username parser.add_argument('-d', '--domain', type=str, default='local') # (optional)", "= 1024 * 1024 * 1024 * 1024 unit =", "(optional) domain - defaults to local parser.add_argument('-pwd', '--password', type=str, default=None)", "parser.add_argument('-pwd', '--password', type=str, default=None) # optional password parser.add_argument('-n', '--unit', type=str,", "(startMsecs, cluster['clusterId'], endMsecs)) dataInStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' %", "type=str, choices=['GiB', 'TiB', 'gib', 'tib'], default='TiB') args = parser.parse_args() vip", "stats:\\n') for cluster in heliosClusters(): heliosCluster(cluster) print(' %s' % cluster['name'])", "'--vip', type=str, default='helios.cohesity.com') # cluster to connect to parser.add_argument('-u', '--username',", "to parser.add_argument('-u', '--username', type=str, required=True) # username parser.add_argument('-d', '--domain', type=str,", "api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs)) dataWrittenStats", "= {} def parseStats(clusterName, dataPoint, statName): if clusterName not in", "in stats.keys(): stats[clusterName] = {} stats[clusterName][statName] = dataPoint['data']['int64Value'] endMsecs =", "stats[clusterName]['logicalSize'] free = capacity - consumed pctUsed = round(100 *", "1) f.write('\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\\n' % (clusterName, toUnits(capacity), toUnits(consumed), toUnits(free), pctUsed, toUnits(dataIn), toUnits(dataWritten),", "% cluster['name']) capacityStats = api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs, cluster['clusterId'], startMsecs))", "= 1024 * 1024 * 1024 unit = 'GiB' def", "authenticate apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=True, noretry=True) # outfile now", "cluster['name'], cluster['clusterId'], endMsecs)) logicalSizeStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'],", "cluster stats:\\n') for cluster in heliosClusters(): heliosCluster(cluster) print(' %s' %", "/ consumed, 1) dataReduction = round(float(dataIn) / dataWritten, 1) f.write('\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\\n'", "to local parser.add_argument('-pwd', '--password', type=str, default=None) # optional password parser.add_argument('-n',", "if clusterName not in stats.keys(): stats[clusterName] = {} stats[clusterName][statName] =", "default='local') # (optional) domain - defaults to local parser.add_argument('-pwd', '--password',", "import pyhesity wrapper module from pyhesity import * from datetime", "default='helios.cohesity.com') # cluster to connect to parser.add_argument('-u', '--username', type=str, required=True)" ]
[ "number): im = Image.open(self.filename) im1 = im.resize((64,64), Image.ANTIALIAS); # 转换为64x64", "classname = n[num] proba = knn.predict_proba(query_feature) msg = [num, classname,", "def classifyImage(self, feature_X, label_y, number): im = Image.open(self.filename) im1 =", "= b.query(self.feature, k=3) print dist, ind ind = ind[0] #", "for l_list in l: if 0 == l_list[1]: # print", "findMostSimilarImg(self, feature_X, serial): X = feature_X b = BallTree(X) #", "X_train = X[train_indices] y_train = y[train_indices] X_test = X[test_indices] y_test", "+ 1) no_imgs = cache[1:len(cache)] no_imgs.append(cache[0]) # print no_imgs #", "skfind = [None] * len(skf) cnt = 0 for train_index", "import KNeighborsClassifier from sklearn.neighbors import BallTree from sklearn import cross_validation", "parent=None): super(MalwareImageClass, self).__init__(parent) self.filename = str(filename)#.encode('cp936') self.feature = '' '''", "n_neighbors = 5; # better to have this at the", "@n:所有样本家族名称 @l:对应家族个数 ''' def prepareData2Matrix(self, X, y, n, l): n_samples,", "QtCore.pyqtSignal(int, list) def __init__(self, filename, parent=None): super(MalwareImageClass, self).__init__(parent) self.filename =", "= n[num] proba = knn.predict_proba(query_feature) msg = [num, classname, proba]", "print no_imgs # 输出所有家族包含文件个数 conf_mat = numpy.zeros((len(no_imgs), len(no_imgs))) # 初始化矩阵", "no_imgs = cache[1:len(cache)] no_imgs.append(cache[0]) # print no_imgs # 输出所有家族包含文件个数 conf_mat", "conf_mat, no_imgs, list_fams def run(self): print \"start draw\" X, y,", "run(self): print \"start draw\" X, y, n, l = self.getClassifyLabel()", "X_test = X[test_indices] y_test = y[test_indices] # Training import time", "balltrees寻找数据集中最相近的样本 返回距离值及样本标签号 ''' def findMostSimilarImg(self, feature_X, serial): X = feature_X", "os, glob, numpy, sys from PIL import Image from sklearn.cross_validation", "print dist, ind ind = ind[0] # print ind l", "__init__(self, filename, parent=None): super(MalwareImageClass, self).__init__(parent) self.filename = str(filename)#.encode('cp936') self.feature =", "获取特征和标签 X = feature_X y = label_y n = number", "# Training import time tic = time.time() clf.fit(X_train,y_train) toc =", "sklearn import leargist import cPickle import random import sys reload(sys)", "return X, y, n ,l ''' 对图片进行分类 train@训练集特征 label@训练集标签 '''", "# print l[l_list[3] - 1] # print l_list cache.append(l[l_list[3] -", "BallTree(X) # 5个最相近的样本 dist, ind = b.query(self.feature, k=3) print dist,", "range(kfold): train_indices = skfind[i][0] test_indices = skfind[i][1] clf = []", "random.seed(random.random()) random.shuffle(p) X, y = X[p], y[p] # 打乱数组 kfold", "= confusion_matrix(y_test,y_predict) conf_mat = conf_mat + cm return conf_mat, no_imgs,", "''' balltrees寻找数据集中最相近的样本 返回距离值及样本标签号 ''' def findMostSimilarImg(self, feature_X, serial): X =", "= self.getClassifyLabel() cm, nimg, listf = self.prepareData2Matrix(X, y, n, l)", "l = cPickle.load(open(\"./datafiles/imglabel.p\", \"rb\")) # [家族号, 家族中序号, 文件名, 总序号] return", "生成灰阶图,只需要前320内容 query_feature = feature.reshape(1, -1) self.feature = query_feature # 获取特征和标签", "\"start draw\" X, y, n, l = self.getClassifyLabel() cm, nimg,", "l) msg = [cm, nimg, listf] self.finishSignal.emit(msg) class MalwareImageClass(QtCore.QThread): malwarSignal", "10 # 10重 skf = StratifiedKFold(y,kfold) skfind = [None] *", "nimg, listf = self.prepareData2Matrix(X, y, n, l) msg = [cm,", "time.time() print \"testing time = \", toc-tic # roughly 0.3", "list) concluSignal = QtCore.pyqtSignal(int, list) def __init__(self, filename, parent=None): super(MalwareImageClass,", "'' ''' 获取训练结果 特征,标签,文件名称及相应的序号 ''' def getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\")", "= cPickle.load(open(\"./datafiles/imglabel.p\", \"rb\")) # [家族号, 家族中序号, 文件名, 总序号] return X,", "leargist.color_gist(im1); # 960 values feature = des[0:320]; # 生成灰阶图,只需要前320内容 query_feature", "sklearn.neighbors import BallTree from sklearn import cross_validation from sklearn.utils import", "# 初始化矩阵 n_neighbors = 5 # 10-fold Cross Validation for", "prepareData2Matrix(self, X, y, n, l): n_samples, useless = X.shape p", "n, l): n_samples, useless = X.shape p = range(n_samples) random.seed(random.random())", "proba] self.malwarSignal.emit(1, msg) ''' balltrees寻找数据集中最相近的样本 返回距离值及样本标签号 ''' def findMostSimilarImg(self, feature_X,", "= ind[0] # print ind l = serial imgs =", "X, y, n ,l ''' 对图片进行分类 train@训练集特征 label@训练集标签 ''' def", "def run(self): print \"start draw\" X, y, n, l =", "y[train_indices] X_test = X[test_indices] y_test = y[test_indices] # Training import", "is labels and not indices toc = time.time() print \"testing", "list_fams = n cache = [] no_imgs = [] for", "numpy.load(\"./datafiles/img_features.npy\") # 特征 y = numpy.load(\"./datafiles/img_labels.npy\") # 标签 n =", "im1 = im.resize((64,64), Image.ANTIALIAS); # 转换为64x64 des = leargist.color_gist(im1); #", "QtCore.pyqtSignal(int, list) concluSignal = QtCore.pyqtSignal(int, list) def __init__(self, filename, parent=None):", "\"testing time = \", toc-tic # roughly 0.3 secs #", "filename, parent=None): super(MalwareImageClass, self).__init__(parent) self.filename = str(filename)#.encode('cp936') self.feature = ''", "des[0:320]; # 生成灰阶图,只需要前320内容 query_feature = feature.reshape(1, -1) self.feature = query_feature", "malwarSignal = QtCore.pyqtSignal(int, list) concluSignal = QtCore.pyqtSignal(int, list) def __init__(self,", "__init__(self, parent=None): super(ValidationResult, self).__init__(parent) def getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\") #", "feature_X b = BallTree(X) # 5个最相近的样本 dist, ind = b.query(self.feature,", "self.concluSignal.emit(2, imgs) def run(self): X, y, n ,l = self.getClassifyLabel()", "have this at the start of the code knn =", "= train_index cnt += 1 list_fams = n cache =", "X[train_indices] y_train = y[train_indices] X_test = X[test_indices] y_test = y[test_indices]", "家族中序号, 文件名, 总序号] return X, y, n ,l ''' 准备绘制矩阵的数据", "knn = KNeighborsClassifier(n_neighbors, weights='distance') knn.fit(X, y) num = int(knn.predict(query_feature)) classname", "i in range(kfold): train_indices = skfind[i][0] test_indices = skfind[i][1] clf", "# output is labels and not indices toc = time.time()", "= KNeighborsClassifier(n_neighbors, weights='distance') X_train = X[train_indices] y_train = y[train_indices] X_test", "0.3 secs # Compute confusion matrix cm = [] cm", "import Image from sklearn.cross_validation import StratifiedKFold from sklearn.metrics import confusion_matrix", "len(no_imgs))) # 初始化矩阵 n_neighbors = 5 # 10-fold Cross Validation", "# print l_list cache.append(l[l_list[3] - 1][1] + 1) no_imgs =", "roughly 0.3 secs # Compute confusion matrix cm = []", "listf = self.prepareData2Matrix(X, y, n, l) msg = [cm, nimg,", "conf_mat + cm return conf_mat, no_imgs, list_fams def run(self): print", "# 5个最相近的样本 dist, ind = b.query(self.feature, k=3) print dist, ind", "ind = ind[0] # print ind l = serial imgs", "# print ind l = serial imgs = [] for", "rank for name in l: if rank == name[3]: #", "def __init__(self, parent=None): super(ValidationResult, self).__init__(parent) def getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\")", "n, l) msg = [cm, nimg, listf] self.finishSignal.emit(msg) class MalwareImageClass(QtCore.QThread):", "feature_X, label_y, number): im = Image.open(self.filename) im1 = im.resize((64,64), Image.ANTIALIAS);", "= X[train_indices] y_train = y[train_indices] X_test = X[test_indices] y_test =", "concluSignal = QtCore.pyqtSignal(int, list) def __init__(self, filename, parent=None): super(MalwareImageClass, self).__init__(parent)", "# 输出所有家族包含文件个数 conf_mat = numpy.zeros((len(no_imgs), len(no_imgs))) # 初始化矩阵 n_neighbors =", "0 == l_list[1]: # print l[l_list[3] - 1] # print", "return conf_mat, no_imgs, list_fams def run(self): print \"start draw\" X,", "msg = [cm, nimg, listf] self.finishSignal.emit(msg) class MalwareImageClass(QtCore.QThread): malwarSignal =", "and not indices toc = time.time() print \"testing time =", "y, n ,l ''' 对图片进行分类 train@训练集特征 label@训练集标签 ''' def classifyImage(self,", "= QtCore.pyqtSignal(int, list) def __init__(self, filename, parent=None): super(MalwareImageClass, self).__init__(parent) self.filename", "2.5 secs # Testing y_predict = [] tic = time.time()", "# roughly 0.3 secs # Compute confusion matrix cm =", "listf] self.finishSignal.emit(msg) class MalwareImageClass(QtCore.QThread): malwarSignal = QtCore.pyqtSignal(int, list) concluSignal =", "from PyQt4 import QtCore import os, glob, numpy, sys from", "self).__init__(parent) def getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\") # 特征 y =", "start of the code knn = KNeighborsClassifier(n_neighbors, weights='distance') knn.fit(X, y)", "= StratifiedKFold(y,kfold) skfind = [None] * len(skf) cnt = 0", "query_feature = feature.reshape(1, -1) self.feature = query_feature # 获取特征和标签 X", "''' def findMostSimilarImg(self, feature_X, serial): X = feature_X b =", "\", toc-tic # roughly 0.3 secs # Compute confusion matrix", "= serial imgs = [] for rank in ind: #", "X = numpy.load(\"./datafiles/img_features.npy\") # 特征 y = numpy.load(\"./datafiles/img_labels.npy\") # 标签", "文件名, 总序号] return X, y, n ,l ''' 准备绘制矩阵的数据 @X:特征矩阵", "not indices toc = time.time() print \"testing time = \",", "= [] for rank in ind: # print rank for", "for train_index in skf: skfind[cnt] = train_index cnt += 1", "PIL import Image from sklearn.cross_validation import StratifiedKFold from sklearn.metrics import", "= leargist.color_gist(im1); # 960 values feature = des[0:320]; # 生成灰阶图,只需要前320内容", "def __init__(self, filename, parent=None): super(MalwareImageClass, self).__init__(parent) self.filename = str(filename)#.encode('cp936') self.feature", "<reponame>zengrx/S.M.A.R.T<filename>src/advanceoperate/malimgthread.py<gh_stars>1-10 #coding=utf-8 from PyQt4 import QtCore import os, glob, numpy,", "# 转换为64x64 des = leargist.color_gist(im1); # 960 values feature =", "sys from PIL import Image from sklearn.cross_validation import StratifiedKFold from", "= Image.open(self.filename) im1 = im.resize((64,64), Image.ANTIALIAS); # 转换为64x64 des =", "skf: skfind[cnt] = train_index cnt += 1 list_fams = n", "ind l = serial imgs = [] for rank in", "self.prepareData2Matrix(X, y, n, l) msg = [cm, nimg, listf] self.finishSignal.emit(msg)", "[] for l_list in l: if 0 == l_list[1]: #", "leargist import cPickle import random import sys reload(sys) sys.setdefaultencoding( \"utf-8\"", "weights='distance') knn.fit(X, y) num = int(knn.predict(query_feature)) classname = n[num] proba", "name in l: if rank == name[3]: # print name", "sklearn.utils import shuffle import sklearn import leargist import cPickle import", "= X[p], y[p] # 打乱数组 kfold = 10 # 10重", "print l_list cache.append(l[l_list[3] - 1][1] + 1) no_imgs = cache[1:len(cache)]", "sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import", "in l: if rank == name[3]: # print name imgs.append(name[2])", "[] for rank in ind: # print rank for name", "= [None] * len(skf) cnt = 0 for train_index in", "l: if 0 == l_list[1]: # print l[l_list[3] - 1]", "sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import BallTree from sklearn import", "y_predict = clf.predict(X_test) # output is labels and not indices", "n, l = self.getClassifyLabel() cm, nimg, listf = self.prepareData2Matrix(X, y,", "= des[0:320]; # 生成灰阶图,只需要前320内容 query_feature = feature.reshape(1, -1) self.feature =", "knn.predict_proba(query_feature) msg = [num, classname, proba] self.malwarSignal.emit(1, msg) ''' balltrees寻找数据集中最相近的样本", "= str(filename)#.encode('cp936') self.feature = '' ''' 获取训练结果 特征,标签,文件名称及相应的序号 ''' def", "n ,l ''' 准备绘制矩阵的数据 @X:特征矩阵 @y:标签 @n:所有样本家族名称 @l:对应家族个数 ''' def", "[] no_imgs = [] for l_list in l: if 0", "matrix cm = [] cm = confusion_matrix(y_test,y_predict) conf_mat = conf_mat", "print ind l = serial imgs = [] for rank", "glob, numpy, sys from PIL import Image from sklearn.cross_validation import", "BallTree from sklearn import cross_validation from sklearn.utils import shuffle import", "in range(kfold): train_indices = skfind[i][0] test_indices = skfind[i][1] clf =", "skfind[cnt] = train_index cnt += 1 list_fams = n cache", "weights='distance') X_train = X[train_indices] y_train = y[train_indices] X_test = X[test_indices]", "y, n, l) msg = [cm, nimg, listf] self.finishSignal.emit(msg) class", "输出所有家族包含文件个数 conf_mat = numpy.zeros((len(no_imgs), len(no_imgs))) # 初始化矩阵 n_neighbors = 5", "= conf_mat + cm return conf_mat, no_imgs, list_fams def run(self):", "# 特征 y = numpy.load(\"./datafiles/img_labels.npy\") # 标签 n = cPickle.load(open(\"./datafiles/img.p\",\"rb\"))", "cPickle import random import sys reload(sys) sys.setdefaultencoding( \"utf-8\" ) class", "= time.time() clf.fit(X_train,y_train) toc = time.time() print \"training time= \",", "cm, nimg, listf = self.prepareData2Matrix(X, y, n, l) msg =", "获取训练结果 特征,标签,文件名称及相应的序号 ''' def getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\") # 特征", "= time.time() print \"training time= \", toc-tic # roughly 2.5", "# print name imgs.append(name[2]) self.concluSignal.emit(2, imgs) def run(self): X, y,", "cPickle.load(open(\"./datafiles/img.p\",\"rb\")) # 标号 l = cPickle.load(open(\"./datafiles/imglabel.p\", \"rb\")) # [家族号, 家族中序号,", "-1) self.feature = query_feature # 获取特征和标签 X = feature_X y", "1][1] + 1) no_imgs = cache[1:len(cache)] no_imgs.append(cache[0]) # print no_imgs", "X = feature_X y = label_y n = number n_neighbors", "- 1][1] + 1) no_imgs = cache[1:len(cache)] no_imgs.append(cache[0]) # print", "clf = KNeighborsClassifier(n_neighbors, weights='distance') X_train = X[train_indices] y_train = y[train_indices]", "# 打乱数组 kfold = 10 # 10重 skf = StratifiedKFold(y,kfold)", "import time tic = time.time() clf.fit(X_train,y_train) toc = time.time() print", "初始化矩阵 n_neighbors = 5 # 10-fold Cross Validation for i", "= label_y n = number n_neighbors = 5; # better", "960 values feature = des[0:320]; # 生成灰阶图,只需要前320内容 query_feature = feature.reshape(1,", "KNeighborsClassifier(n_neighbors, weights='distance') knn.fit(X, y) num = int(knn.predict(query_feature)) classname = n[num]", "y_train = y[train_indices] X_test = X[test_indices] y_test = y[test_indices] #", "ind[0] # print ind l = serial imgs = []", "cache[1:len(cache)] no_imgs.append(cache[0]) # print no_imgs # 输出所有家族包含文件个数 conf_mat = numpy.zeros((len(no_imgs),", "# 960 values feature = des[0:320]; # 生成灰阶图,只需要前320内容 query_feature =", "run(self): X, y, n ,l = self.getClassifyLabel() self.classifyImage(X, y, n)", "= feature_X b = BallTree(X) # 5个最相近的样本 dist, ind =", "numpy, sys from PIL import Image from sklearn.cross_validation import StratifiedKFold", "X = feature_X b = BallTree(X) # 5个最相近的样本 dist, ind", "y = numpy.load(\"./datafiles/img_labels.npy\") # 标签 n = cPickle.load(open(\"./datafiles/img.p\",\"rb\")) # 标号", "= KNeighborsClassifier(n_neighbors, weights='distance') knn.fit(X, y) num = int(knn.predict(query_feature)) classname =", "import os, glob, numpy, sys from PIL import Image from", "k=3) print dist, ind ind = ind[0] # print ind", "cnt = 0 for train_index in skf: skfind[cnt] = train_index", "# 10重 skf = StratifiedKFold(y,kfold) skfind = [None] * len(skf)", "labels and not indices toc = time.time() print \"testing time", "ind = b.query(self.feature, k=3) print dist, ind ind = ind[0]", "y_predict = [] tic = time.time() y_predict = clf.predict(X_test) #", "+ cm return conf_mat, no_imgs, list_fams def run(self): print \"start", "import leargist import cPickle import random import sys reload(sys) sys.setdefaultencoding(", "tic = time.time() y_predict = clf.predict(X_test) # output is labels", "dist, ind ind = ind[0] # print ind l =", "toc = time.time() print \"testing time = \", toc-tic #", "numpy.load(\"./datafiles/img_labels.npy\") # 标签 n = cPickle.load(open(\"./datafiles/img.p\",\"rb\")) # 标号 l =", "家族中序号, 文件名, 总序号] return X, y, n ,l ''' 对图片进行分类", "= X.shape p = range(n_samples) random.seed(random.random()) random.shuffle(p) X, y =", "= [] tic = time.time() y_predict = clf.predict(X_test) # output", "cm = [] cm = confusion_matrix(y_test,y_predict) conf_mat = conf_mat +", "knn.fit(X, y) num = int(knn.predict(query_feature)) classname = n[num] proba =", "#coding=utf-8 from PyQt4 import QtCore import os, glob, numpy, sys", "y) num = int(knn.predict(query_feature)) classname = n[num] proba = knn.predict_proba(query_feature)", "l_list[1]: # print l[l_list[3] - 1] # print l_list cache.append(l[l_list[3]", "n = cPickle.load(open(\"./datafiles/img.p\",\"rb\")) # 标号 l = cPickle.load(open(\"./datafiles/imglabel.p\", \"rb\")) #", "import cross_validation from sklearn.utils import shuffle import sklearn import leargist", "name[3]: # print name imgs.append(name[2]) self.concluSignal.emit(2, imgs) def run(self): X,", "the code knn = KNeighborsClassifier(n_neighbors, weights='distance') knn.fit(X, y) num =", "ind: # print rank for name in l: if rank", "useless = X.shape p = range(n_samples) random.seed(random.random()) random.shuffle(p) X, y", "X[p], y[p] # 打乱数组 kfold = 10 # 10重 skf", "# [家族号, 家族中序号, 文件名, 总序号] return X, y, n ,l", "\"utf-8\" ) class ValidationResult(QtCore.QThread): finishSignal = QtCore.pyqtSignal(list) def __init__(self, parent=None):", "time tic = time.time() clf.fit(X_train,y_train) toc = time.time() print \"training", "no_imgs, list_fams def run(self): print \"start draw\" X, y, n,", "KNeighborsClassifier from sklearn.neighbors import BallTree from sklearn import cross_validation from", "[] cm = confusion_matrix(y_test,y_predict) conf_mat = conf_mat + cm return", "from sklearn import cross_validation from sklearn.utils import shuffle import sklearn", "clf = [] clf = KNeighborsClassifier(n_neighbors, weights='distance') X_train = X[train_indices]", "num = int(knn.predict(query_feature)) classname = n[num] proba = knn.predict_proba(query_feature) msg", "import sklearn import leargist import cPickle import random import sys", "test_indices = skfind[i][1] clf = [] clf = KNeighborsClassifier(n_neighbors, weights='distance')", "= QtCore.pyqtSignal(int, list) concluSignal = QtCore.pyqtSignal(int, list) def __init__(self, filename,", "in ind: # print rank for name in l: if", "# roughly 2.5 secs # Testing y_predict = [] tic", "secs # Compute confusion matrix cm = [] cm =", "self.finishSignal.emit(msg) class MalwareImageClass(QtCore.QThread): malwarSignal = QtCore.pyqtSignal(int, list) concluSignal = QtCore.pyqtSignal(int,", "output is labels and not indices toc = time.time() print", "super(MalwareImageClass, self).__init__(parent) self.filename = str(filename)#.encode('cp936') self.feature = '' ''' 获取训练结果", "= [num, classname, proba] self.malwarSignal.emit(1, msg) ''' balltrees寻找数据集中最相近的样本 返回距离值及样本标签号 '''", "def findMostSimilarImg(self, feature_X, serial): X = feature_X b = BallTree(X)", "l = self.getClassifyLabel() cm, nimg, listf = self.prepareData2Matrix(X, y, n,", "y[test_indices] # Training import time tic = time.time() clf.fit(X_train,y_train) toc", "''' 获取训练结果 特征,标签,文件名称及相应的序号 ''' def getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\") #", "文件名, 总序号] return X, y, n ,l ''' 对图片进行分类 train@训练集特征", "[cm, nimg, listf] self.finishSignal.emit(msg) class MalwareImageClass(QtCore.QThread): malwarSignal = QtCore.pyqtSignal(int, list)", "= '' ''' 获取训练结果 特征,标签,文件名称及相应的序号 ''' def getClassifyLabel(self): X =", "serial): X = feature_X b = BallTree(X) # 5个最相近的样本 dist,", "= y[train_indices] X_test = X[test_indices] y_test = y[test_indices] # Training", "QtCore.pyqtSignal(list) def __init__(self, parent=None): super(ValidationResult, self).__init__(parent) def getClassifyLabel(self): X =", "= range(n_samples) random.seed(random.random()) random.shuffle(p) X, y = X[p], y[p] #", "import cPickle import random import sys reload(sys) sys.setdefaultencoding( \"utf-8\" )", "getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\") # 特征 y = numpy.load(\"./datafiles/img_labels.npy\") #", "n_neighbors = 5 # 10-fold Cross Validation for i in", "n = number n_neighbors = 5; # better to have", "l = serial imgs = [] for rank in ind:", "class MalwareImageClass(QtCore.QThread): malwarSignal = QtCore.pyqtSignal(int, list) concluSignal = QtCore.pyqtSignal(int, list)", "KNeighborsClassifier(n_neighbors, weights='distance') X_train = X[train_indices] y_train = y[train_indices] X_test =", "MalwareImageClass(QtCore.QThread): malwarSignal = QtCore.pyqtSignal(int, list) concluSignal = QtCore.pyqtSignal(int, list) def", "转换为64x64 des = leargist.color_gist(im1); # 960 values feature = des[0:320];", "at the start of the code knn = KNeighborsClassifier(n_neighbors, weights='distance')", "feature.reshape(1, -1) self.feature = query_feature # 获取特征和标签 X = feature_X", "skfind[i][0] test_indices = skfind[i][1] clf = [] clf = KNeighborsClassifier(n_neighbors,", "str(filename)#.encode('cp936') self.feature = '' ''' 获取训练结果 特征,标签,文件名称及相应的序号 ''' def getClassifyLabel(self):", "\"training time= \", toc-tic # roughly 2.5 secs # Testing", "train@训练集特征 label@训练集标签 ''' def classifyImage(self, feature_X, label_y, number): im =", "for i in range(kfold): train_indices = skfind[i][0] test_indices = skfind[i][1]", "def run(self): X, y, n ,l = self.getClassifyLabel() self.classifyImage(X, y,", "im.resize((64,64), Image.ANTIALIAS); # 转换为64x64 des = leargist.color_gist(im1); # 960 values", "总序号] return X, y, n ,l ''' 准备绘制矩阵的数据 @X:特征矩阵 @y:标签", "''' 准备绘制矩阵的数据 @X:特征矩阵 @y:标签 @n:所有样本家族名称 @l:对应家族个数 ''' def prepareData2Matrix(self, X,", "sklearn import cross_validation from sklearn.utils import shuffle import sklearn import", "QtCore import os, glob, numpy, sys from PIL import Image", "Testing y_predict = [] tic = time.time() y_predict = clf.predict(X_test)", "cm = confusion_matrix(y_test,y_predict) conf_mat = conf_mat + cm return conf_mat,", "l_list in l: if 0 == l_list[1]: # print l[l_list[3]", "l[l_list[3] - 1] # print l_list cache.append(l[l_list[3] - 1][1] +", "y = label_y n = number n_neighbors = 5; #", "# print no_imgs # 输出所有家族包含文件个数 conf_mat = numpy.zeros((len(no_imgs), len(no_imgs))) #", "X[test_indices] y_test = y[test_indices] # Training import time tic =", "sklearn.cross_validation import StratifiedKFold from sklearn.metrics import confusion_matrix from sklearn.neighbors import", "msg) ''' balltrees寻找数据集中最相近的样本 返回距离值及样本标签号 ''' def findMostSimilarImg(self, feature_X, serial): X", "PyQt4 import QtCore import os, glob, numpy, sys from PIL", "10-fold Cross Validation for i in range(kfold): train_indices = skfind[i][0]", "== l_list[1]: # print l[l_list[3] - 1] # print l_list", "''' def getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\") # 特征 y =", "of the code knn = KNeighborsClassifier(n_neighbors, weights='distance') knn.fit(X, y) num", "@X:特征矩阵 @y:标签 @n:所有样本家族名称 @l:对应家族个数 ''' def prepareData2Matrix(self, X, y, n,", "X, y = X[p], y[p] # 打乱数组 kfold = 10", "0 for train_index in skf: skfind[cnt] = train_index cnt +=", "self.feature = query_feature # 获取特征和标签 X = feature_X y =", "= 5 # 10-fold Cross Validation for i in range(kfold):", "准备绘制矩阵的数据 @X:特征矩阵 @y:标签 @n:所有样本家族名称 @l:对应家族个数 ''' def prepareData2Matrix(self, X, y,", "time.time() y_predict = clf.predict(X_test) # output is labels and not", "skf = StratifiedKFold(y,kfold) skfind = [None] * len(skf) cnt =", "[] clf = KNeighborsClassifier(n_neighbors, weights='distance') X_train = X[train_indices] y_train =", "conf_mat = conf_mat + cm return conf_mat, no_imgs, list_fams def", "= 5; # better to have this at the start", "n[num] proba = knn.predict_proba(query_feature) msg = [num, classname, proba] self.malwarSignal.emit(1,", "train_index cnt += 1 list_fams = n cache = []", "super(ValidationResult, self).__init__(parent) def getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\") # 特征 y", "query_feature # 获取特征和标签 X = feature_X y = label_y n", "for name in l: if rank == name[3]: # print", "print \"training time= \", toc-tic # roughly 2.5 secs #", "5; # better to have this at the start of", "y[p] # 打乱数组 kfold = 10 # 10重 skf =", "print l[l_list[3] - 1] # print l_list cache.append(l[l_list[3] - 1][1]", "X.shape p = range(n_samples) random.seed(random.random()) random.shuffle(p) X, y = X[p],", "[num, classname, proba] self.malwarSignal.emit(1, msg) ''' balltrees寻找数据集中最相近的样本 返回距离值及样本标签号 ''' def", "len(skf) cnt = 0 for train_index in skf: skfind[cnt] =", "from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import BallTree from sklearn", "1 list_fams = n cache = [] no_imgs = []", "self.getClassifyLabel() cm, nimg, listf = self.prepareData2Matrix(X, y, n, l) msg", "X, y, n, l = self.getClassifyLabel() cm, nimg, listf =", "''' def classifyImage(self, feature_X, label_y, number): im = Image.open(self.filename) im1", "= clf.predict(X_test) # output is labels and not indices toc", "print \"testing time = \", toc-tic # roughly 0.3 secs", "# 标号 l = cPickle.load(open(\"./datafiles/imglabel.p\", \"rb\")) # [家族号, 家族中序号, 文件名,", "\"rb\")) # [家族号, 家族中序号, 文件名, 总序号] return X, y, n", "range(n_samples) random.seed(random.random()) random.shuffle(p) X, y = X[p], y[p] # 打乱数组", "= BallTree(X) # 5个最相近的样本 dist, ind = b.query(self.feature, k=3) print", "打乱数组 kfold = 10 # 10重 skf = StratifiedKFold(y,kfold) skfind", "# 获取特征和标签 X = feature_X y = label_y n =", "= feature.reshape(1, -1) self.feature = query_feature # 获取特征和标签 X =", "sys reload(sys) sys.setdefaultencoding( \"utf-8\" ) class ValidationResult(QtCore.QThread): finishSignal = QtCore.pyqtSignal(list)", "no_imgs = [] for l_list in l: if 0 ==", "Cross Validation for i in range(kfold): train_indices = skfind[i][0] test_indices", "train_indices = skfind[i][0] test_indices = skfind[i][1] clf = [] clf", "clf.fit(X_train,y_train) toc = time.time() print \"training time= \", toc-tic #", "- 1] # print l_list cache.append(l[l_list[3] - 1][1] + 1)", "shuffle import sklearn import leargist import cPickle import random import", "time.time() clf.fit(X_train,y_train) toc = time.time() print \"training time= \", toc-tic", "from sklearn.utils import shuffle import sklearn import leargist import cPickle", "im = Image.open(self.filename) im1 = im.resize((64,64), Image.ANTIALIAS); # 转换为64x64 des", "list) def __init__(self, filename, parent=None): super(MalwareImageClass, self).__init__(parent) self.filename = str(filename)#.encode('cp936')", "classifyImage(self, feature_X, label_y, number): im = Image.open(self.filename) im1 = im.resize((64,64),", "the start of the code knn = KNeighborsClassifier(n_neighbors, weights='distance') knn.fit(X,", "import sys reload(sys) sys.setdefaultencoding( \"utf-8\" ) class ValidationResult(QtCore.QThread): finishSignal =", "in l: if 0 == l_list[1]: # print l[l_list[3] -", "l): n_samples, useless = X.shape p = range(n_samples) random.seed(random.random()) random.shuffle(p)", "n cache = [] no_imgs = [] for l_list in", "X, y, n ,l ''' 准备绘制矩阵的数据 @X:特征矩阵 @y:标签 @n:所有样本家族名称 @l:对应家族个数", "label_y, number): im = Image.open(self.filename) im1 = im.resize((64,64), Image.ANTIALIAS); #", "self.malwarSignal.emit(1, msg) ''' balltrees寻找数据集中最相近的样本 返回距离值及样本标签号 ''' def findMostSimilarImg(self, feature_X, serial):", "conf_mat = numpy.zeros((len(no_imgs), len(no_imgs))) # 初始化矩阵 n_neighbors = 5 #", "特征,标签,文件名称及相应的序号 ''' def getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\") # 特征 y", "imgs = [] for rank in ind: # print rank", "= [] cm = confusion_matrix(y_test,y_predict) conf_mat = conf_mat + cm", "= self.prepareData2Matrix(X, y, n, l) msg = [cm, nimg, listf]", "Image.open(self.filename) im1 = im.resize((64,64), Image.ANTIALIAS); # 转换为64x64 des = leargist.color_gist(im1);", "= 10 # 10重 skf = StratifiedKFold(y,kfold) skfind = [None]", "5 # 10-fold Cross Validation for i in range(kfold): train_indices", "time= \", toc-tic # roughly 2.5 secs # Testing y_predict", "from PIL import Image from sklearn.cross_validation import StratifiedKFold from sklearn.metrics", "\", toc-tic # roughly 2.5 secs # Testing y_predict =", "= QtCore.pyqtSignal(list) def __init__(self, parent=None): super(ValidationResult, self).__init__(parent) def getClassifyLabel(self): X", "roughly 2.5 secs # Testing y_predict = [] tic =", "return X, y, n ,l ''' 准备绘制矩阵的数据 @X:特征矩阵 @y:标签 @n:所有样本家族名称", "= [cm, nimg, listf] self.finishSignal.emit(msg) class MalwareImageClass(QtCore.QThread): malwarSignal = QtCore.pyqtSignal(int,", "feature = des[0:320]; # 生成灰阶图,只需要前320内容 query_feature = feature.reshape(1, -1) self.feature", ") class ValidationResult(QtCore.QThread): finishSignal = QtCore.pyqtSignal(list) def __init__(self, parent=None): super(ValidationResult,", "# better to have this at the start of the", "cache = [] no_imgs = [] for l_list in l:", "= number n_neighbors = 5; # better to have this", "y = X[p], y[p] # 打乱数组 kfold = 10 #", "def getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\") # 特征 y = numpy.load(\"./datafiles/img_labels.npy\")", "skfind[i][1] clf = [] clf = KNeighborsClassifier(n_neighbors, weights='distance') X_train =", "= numpy.zeros((len(no_imgs), len(no_imgs))) # 初始化矩阵 n_neighbors = 5 # 10-fold", "1] # print l_list cache.append(l[l_list[3] - 1][1] + 1) no_imgs", "import BallTree from sklearn import cross_validation from sklearn.utils import shuffle", "= feature_X y = label_y n = number n_neighbors =", "# 生成灰阶图,只需要前320内容 query_feature = feature.reshape(1, -1) self.feature = query_feature #", "name imgs.append(name[2]) self.concluSignal.emit(2, imgs) def run(self): X, y, n ,l", "y, n ,l ''' 准备绘制矩阵的数据 @X:特征矩阵 @y:标签 @n:所有样本家族名称 @l:对应家族个数 '''", "clf.predict(X_test) # output is labels and not indices toc =", "from sklearn.cross_validation import StratifiedKFold from sklearn.metrics import confusion_matrix from sklearn.neighbors", "dist, ind = b.query(self.feature, k=3) print dist, ind ind =", "Training import time tic = time.time() clf.fit(X_train,y_train) toc = time.time()", "Image.ANTIALIAS); # 转换为64x64 des = leargist.color_gist(im1); # 960 values feature", "@y:标签 @n:所有样本家族名称 @l:对应家族个数 ''' def prepareData2Matrix(self, X, y, n, l):", "cnt += 1 list_fams = n cache = [] no_imgs", "# 10-fold Cross Validation for i in range(kfold): train_indices =", "reload(sys) sys.setdefaultencoding( \"utf-8\" ) class ValidationResult(QtCore.QThread): finishSignal = QtCore.pyqtSignal(list) def", "self.filename = str(filename)#.encode('cp936') self.feature = '' ''' 获取训练结果 特征,标签,文件名称及相应的序号 '''", "= im.resize((64,64), Image.ANTIALIAS); # 转换为64x64 des = leargist.color_gist(im1); # 960", "# Testing y_predict = [] tic = time.time() y_predict =", "# Compute confusion matrix cm = [] cm = confusion_matrix(y_test,y_predict)", "10重 skf = StratifiedKFold(y,kfold) skfind = [None] * len(skf) cnt", "y, n ,l = self.getClassifyLabel() self.classifyImage(X, y, n) self.findMostSimilarImg(X, l)", "no_imgs.append(cache[0]) # print no_imgs # 输出所有家族包含文件个数 conf_mat = numpy.zeros((len(no_imgs), len(no_imgs)))", "number n_neighbors = 5; # better to have this at", "secs # Testing y_predict = [] tic = time.time() y_predict", "nimg, listf] self.finishSignal.emit(msg) class MalwareImageClass(QtCore.QThread): malwarSignal = QtCore.pyqtSignal(int, list) concluSignal", "StratifiedKFold from sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier from", "self).__init__(parent) self.filename = str(filename)#.encode('cp936') self.feature = '' ''' 获取训练结果 特征,标签,文件名称及相应的序号", "feature_X y = label_y n = number n_neighbors = 5;", "imgs.append(name[2]) self.concluSignal.emit(2, imgs) def run(self): X, y, n ,l =", "= knn.predict_proba(query_feature) msg = [num, classname, proba] self.malwarSignal.emit(1, msg) '''", "cache.append(l[l_list[3] - 1][1] + 1) no_imgs = cache[1:len(cache)] no_imgs.append(cache[0]) #", "= y[test_indices] # Training import time tic = time.time() clf.fit(X_train,y_train)", "标签 n = cPickle.load(open(\"./datafiles/img.p\",\"rb\")) # 标号 l = cPickle.load(open(\"./datafiles/imglabel.p\", \"rb\"))", "= numpy.load(\"./datafiles/img_features.npy\") # 特征 y = numpy.load(\"./datafiles/img_labels.npy\") # 标签 n", "int(knn.predict(query_feature)) classname = n[num] proba = knn.predict_proba(query_feature) msg = [num,", "# print rank for name in l: if rank ==", "l_list cache.append(l[l_list[3] - 1][1] + 1) no_imgs = cache[1:len(cache)] no_imgs.append(cache[0])", "总序号] return X, y, n ,l ''' 对图片进行分类 train@训练集特征 label@训练集标签", "标号 l = cPickle.load(open(\"./datafiles/imglabel.p\", \"rb\")) # [家族号, 家族中序号, 文件名, 总序号]", ",l ''' 对图片进行分类 train@训练集特征 label@训练集标签 ''' def classifyImage(self, feature_X, label_y,", "from sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors", "Compute confusion matrix cm = [] cm = confusion_matrix(y_test,y_predict) conf_mat", "特征 y = numpy.load(\"./datafiles/img_labels.npy\") # 标签 n = cPickle.load(open(\"./datafiles/img.p\",\"rb\")) #", "= time.time() y_predict = clf.predict(X_test) # output is labels and", "l: if rank == name[3]: # print name imgs.append(name[2]) self.concluSignal.emit(2,", "to have this at the start of the code knn", "y, n, l): n_samples, useless = X.shape p = range(n_samples)", "train_index in skf: skfind[cnt] = train_index cnt += 1 list_fams", "1) no_imgs = cache[1:len(cache)] no_imgs.append(cache[0]) # print no_imgs # 输出所有家族包含文件个数", "p = range(n_samples) random.seed(random.random()) random.shuffle(p) X, y = X[p], y[p]", "confusion_matrix from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import BallTree from", "print name imgs.append(name[2]) self.concluSignal.emit(2, imgs) def run(self): X, y, n", "confusion_matrix(y_test,y_predict) conf_mat = conf_mat + cm return conf_mat, no_imgs, list_fams", "if 0 == l_list[1]: # print l[l_list[3] - 1] #", "= skfind[i][0] test_indices = skfind[i][1] clf = [] clf =", "cm return conf_mat, no_imgs, list_fams def run(self): print \"start draw\"", "X, y, n ,l = self.getClassifyLabel() self.classifyImage(X, y, n) self.findMostSimilarImg(X,", "kfold = 10 # 10重 skf = StratifiedKFold(y,kfold) skfind =", "no_imgs # 输出所有家族包含文件个数 conf_mat = numpy.zeros((len(no_imgs), len(no_imgs))) # 初始化矩阵 n_neighbors", "cross_validation from sklearn.utils import shuffle import sklearn import leargist import", "values feature = des[0:320]; # 生成灰阶图,只需要前320内容 query_feature = feature.reshape(1, -1)", "indices toc = time.time() print \"testing time = \", toc-tic", "Image from sklearn.cross_validation import StratifiedKFold from sklearn.metrics import confusion_matrix from", "= \", toc-tic # roughly 0.3 secs # Compute confusion", "proba = knn.predict_proba(query_feature) msg = [num, classname, proba] self.malwarSignal.emit(1, msg)", "n_samples, useless = X.shape p = range(n_samples) random.seed(random.random()) random.shuffle(p) X,", "parent=None): super(ValidationResult, self).__init__(parent) def getClassifyLabel(self): X = numpy.load(\"./datafiles/img_features.npy\") # 特征", "time.time() print \"training time= \", toc-tic # roughly 2.5 secs", "time = \", toc-tic # roughly 0.3 secs # Compute", "rank == name[3]: # print name imgs.append(name[2]) self.concluSignal.emit(2, imgs) def", "finishSignal = QtCore.pyqtSignal(list) def __init__(self, parent=None): super(ValidationResult, self).__init__(parent) def getClassifyLabel(self):", "label_y n = number n_neighbors = 5; # better to", "= X[test_indices] y_test = y[test_indices] # Training import time tic", "import StratifiedKFold from sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier", "class ValidationResult(QtCore.QThread): finishSignal = QtCore.pyqtSignal(list) def __init__(self, parent=None): super(ValidationResult, self).__init__(parent)", "for rank in ind: # print rank for name in", "= [] clf = KNeighborsClassifier(n_neighbors, weights='distance') X_train = X[train_indices] y_train", "对图片进行分类 train@训练集特征 label@训练集标签 ''' def classifyImage(self, feature_X, label_y, number): im", "def prepareData2Matrix(self, X, y, n, l): n_samples, useless = X.shape", "= skfind[i][1] clf = [] clf = KNeighborsClassifier(n_neighbors, weights='distance') X_train", "code knn = KNeighborsClassifier(n_neighbors, weights='distance') knn.fit(X, y) num = int(knn.predict(query_feature))", "rank in ind: # print rank for name in l:", "serial imgs = [] for rank in ind: # print", "= 0 for train_index in skf: skfind[cnt] = train_index cnt", "[] tic = time.time() y_predict = clf.predict(X_test) # output is", "in skf: skfind[cnt] = train_index cnt += 1 list_fams =", "feature_X, serial): X = feature_X b = BallTree(X) # 5个最相近的样本", "b.query(self.feature, k=3) print dist, ind ind = ind[0] # print", "if rank == name[3]: # print name imgs.append(name[2]) self.concluSignal.emit(2, imgs)", "classname, proba] self.malwarSignal.emit(1, msg) ''' balltrees寻找数据集中最相近的样本 返回距离值及样本标签号 ''' def findMostSimilarImg(self,", "random import sys reload(sys) sys.setdefaultencoding( \"utf-8\" ) class ValidationResult(QtCore.QThread): finishSignal", "= n cache = [] no_imgs = [] for l_list", "self.feature = '' ''' 获取训练结果 特征,标签,文件名称及相应的序号 ''' def getClassifyLabel(self): X", "== name[3]: # print name imgs.append(name[2]) self.concluSignal.emit(2, imgs) def run(self):", "this at the start of the code knn = KNeighborsClassifier(n_neighbors,", "= [] for l_list in l: if 0 == l_list[1]:", "@l:对应家族个数 ''' def prepareData2Matrix(self, X, y, n, l): n_samples, useless", "numpy.zeros((len(no_imgs), len(no_imgs))) # 初始化矩阵 n_neighbors = 5 # 10-fold Cross", "import random import sys reload(sys) sys.setdefaultencoding( \"utf-8\" ) class ValidationResult(QtCore.QThread):", "ind ind = ind[0] # print ind l = serial", "import shuffle import sklearn import leargist import cPickle import random", "better to have this at the start of the code", "# 标签 n = cPickle.load(open(\"./datafiles/img.p\",\"rb\")) # 标号 l = cPickle.load(open(\"./datafiles/imglabel.p\",", "draw\" X, y, n, l = self.getClassifyLabel() cm, nimg, listf", "= query_feature # 获取特征和标签 X = feature_X y = label_y", "+= 1 list_fams = n cache = [] no_imgs =", "random.shuffle(p) X, y = X[p], y[p] # 打乱数组 kfold =", "y_test = y[test_indices] # Training import time tic = time.time()", "= cache[1:len(cache)] no_imgs.append(cache[0]) # print no_imgs # 输出所有家族包含文件个数 conf_mat =", "sys.setdefaultencoding( \"utf-8\" ) class ValidationResult(QtCore.QThread): finishSignal = QtCore.pyqtSignal(list) def __init__(self,", "des = leargist.color_gist(im1); # 960 values feature = des[0:320]; #", "= [] no_imgs = [] for l_list in l: if", "label@训练集标签 ''' def classifyImage(self, feature_X, label_y, number): im = Image.open(self.filename)", "[None] * len(skf) cnt = 0 for train_index in skf:", "= cPickle.load(open(\"./datafiles/img.p\",\"rb\")) # 标号 l = cPickle.load(open(\"./datafiles/imglabel.p\", \"rb\")) # [家族号,", "StratifiedKFold(y,kfold) skfind = [None] * len(skf) cnt = 0 for", "confusion matrix cm = [] cm = confusion_matrix(y_test,y_predict) conf_mat =", "tic = time.time() clf.fit(X_train,y_train) toc = time.time() print \"training time=", "msg = [num, classname, proba] self.malwarSignal.emit(1, msg) ''' balltrees寻找数据集中最相近的样本 返回距离值及样本标签号", "* len(skf) cnt = 0 for train_index in skf: skfind[cnt]", "5个最相近的样本 dist, ind = b.query(self.feature, k=3) print dist, ind ind", "toc = time.time() print \"training time= \", toc-tic # roughly", "X, y, n, l): n_samples, useless = X.shape p =", "from sklearn.neighbors import BallTree from sklearn import cross_validation from sklearn.utils", "Validation for i in range(kfold): train_indices = skfind[i][0] test_indices =", "= time.time() print \"testing time = \", toc-tic # roughly", "''' def prepareData2Matrix(self, X, y, n, l): n_samples, useless =", "b = BallTree(X) # 5个最相近的样本 dist, ind = b.query(self.feature, k=3)", "= numpy.load(\"./datafiles/img_labels.npy\") # 标签 n = cPickle.load(open(\"./datafiles/img.p\",\"rb\")) # 标号 l", "ValidationResult(QtCore.QThread): finishSignal = QtCore.pyqtSignal(list) def __init__(self, parent=None): super(ValidationResult, self).__init__(parent) def", "''' 对图片进行分类 train@训练集特征 label@训练集标签 ''' def classifyImage(self, feature_X, label_y, number):", "import confusion_matrix from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import BallTree", ",l ''' 准备绘制矩阵的数据 @X:特征矩阵 @y:标签 @n:所有样本家族名称 @l:对应家族个数 ''' def prepareData2Matrix(self,", "list_fams def run(self): print \"start draw\" X, y, n, l", "imgs) def run(self): X, y, n ,l = self.getClassifyLabel() self.classifyImage(X,", "print \"start draw\" X, y, n, l = self.getClassifyLabel() cm,", "toc-tic # roughly 0.3 secs # Compute confusion matrix cm", "= int(knn.predict(query_feature)) classname = n[num] proba = knn.predict_proba(query_feature) msg =", "import QtCore import os, glob, numpy, sys from PIL import", "y, n, l = self.getClassifyLabel() cm, nimg, listf = self.prepareData2Matrix(X,", "返回距离值及样本标签号 ''' def findMostSimilarImg(self, feature_X, serial): X = feature_X b", "toc-tic # roughly 2.5 secs # Testing y_predict = []", "cPickle.load(open(\"./datafiles/imglabel.p\", \"rb\")) # [家族号, 家族中序号, 文件名, 总序号] return X, y,", "print rank for name in l: if rank == name[3]:", "[家族号, 家族中序号, 文件名, 总序号] return X, y, n ,l '''", "n ,l ''' 对图片进行分类 train@训练集特征 label@训练集标签 ''' def classifyImage(self, feature_X," ]
[ "\"mygroup\", }, ) judge_command( \"XINFO GROUPS mystream\", {\"command\": \"XINFO\", \"stream_groups\":", "STREAMS mystream writers 0-0 0-0\", { \"command\": \"XREAD\", \"count_const\": \"COUNT\",", "}, ) judge_command( \"XINFO STREAM mystream FULL count 10\", {", "\"stream_id\": [\"1526985054069\", \"1526985055069-10\"], }, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069-10", "\"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"123-123\",", "2\", { \"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\",", "\"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"justid\": \"JUSTID\", }, )", "\"XREADGROUP GROUP mygroup1 Bob COUNT 1 BLOCK 100 NOACK STREAMS", "None) def test_xdel(judge_command): judge_command( \"XDEL mystream 1581165000000 1549611229000 1581060831000\", {\"command\":", "IDEL 300\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\":", "judge_command(\"XINFO consumers mystream mygroup GROUPS mystream\", None) judge_command(\"XINFO groups mystream", "spcify stream id judge_command( \"xadd mystream 123-123 key value\", {", "\"stream_id\": \"123-123\", }, ) judge_command( \"xadd mystream 123-123 key value", "\"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 IDEL 300\", { \"command\":", "\"TIME\", \"timestamp\": \"123456789\", }, ) judge_command( \"XCLAIM mystream mygroup Alice", ") def test_xinfo(judge_command): judge_command( \"XINFO consumers mystream mygroup\", { \"command\":", "\"group\": \"mygroup1\", \"consumer\": \"Bob\", \"count_const\": \"COUNT\", \"count\": \"1\", \"block\": \"BLOCK\",", "\"mystream\", \"full_const\": \"FULL\", \"count_const\": \"count\", \"count\": \"10\", }, ) def", "# FIXME current grammar can't support multiple tokens # so", "\"GROUPS\", \"key\": \"mystream\"}, ) judge_command( \"XINFO STREAM mystream\", {\"command\": \"XINFO\",", "\"svalue\": \"value\", \"stream_id\": \"123-123\", }, ) judge_command( \"xadd mystream 123-123", "delconsumer mykey mygroup myconsumer\", { \"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\":", ") judge_command(\"XGROUP destroy mykey\", None) judge_command(\"XGROUP DESTROY mykey mygroup $\",", "\"FULL\", }, ) judge_command( \"XINFO STREAM mystream FULL count 10\",", "\"*\", }, ) # test for MAXLEN option judge_command( \"xadd", "- + \", None) def test_xadd(judge_command): judge_command( \"xadd mystream MAXLEN", "\"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", }, )", "\"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"force\": \"FORCE\", },", "\"XINFO\", \"stream_groups\": \"GROUPS\", \"key\": \"mystream\"}, ) judge_command( \"XINFO STREAM mystream\",", "\"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 JUSTID\", { \"command\": \"XCLAIM\",", "None) def test_xgroup_destroy(judge_command): judge_command( \"XGROUP destroy mykey mygroup\", { \"command\":", "[\"-\", \"+\"], \"count\": \"10\", }, ) judge_command( \"XPENDING mystream group55", "\"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\"}, ) judge_command( \"XPENDING mystream group55", "\"XGROUP\", \"stream_create\": \"CREATE\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\", },", "1000 * key value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"maxlen\":", "value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"approximately\": \"~\",", "{ \"command\": \"xadd\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"approximately\": \"~\", \"count\":", "\"stream_id\": \"$\", }, ) # short of a parameter judge_command(\"XGROUP", "mystream group55 - + \", None) def test_xadd(judge_command): judge_command( \"xadd", "{\"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\"}, ) judge_command(", "\"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\", \"group\": \"mygroup\", \"consumer\": \"myconsumer\",", "\"0-0\", }, ) judge_command( \"XREAD COUNT 2 BLOCK 1000 STREAMS", "mykey mygroup 123\", { \"command\": \"XGROUP\", \"stream_create\": \"CREATE\", \"key\": \"mykey\",", "\"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\", }, ) judge_command( \"XINFO STREAM", "}, ) judge_command( \"XINFO GROUPS mystream\", {\"command\": \"XINFO\", \"stream_groups\": \"GROUPS\",", "\"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\", \"count_const\": \"count\", \"count\":", "\"123123\", }, ) judge_command( \"XACK mystream group1 123123 111\", {\"command\":", "}, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 123", "\"force\": \"FORCE\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000", "\"FORCE\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0", "\"stream_id\": \"1526569498055-0\", \"idel\": \"IDEL\", }, ) judge_command( \"XCLAIM mystream mygroup", "}, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 JUSTID\",", "\"*\", }, ) # spcify stream id judge_command( \"xadd mystream", "\"help\": \"HELP\"}) judge_command(\"XINFO consumers mystream mygroup GROUPS mystream\", None) judge_command(\"XINFO", "FULL count 10\", { \"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\",", "\"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], \"count_const\": \"count\", \"count\": \"10\", },", "\"group1\", \"stream_id\": \"123123\", }, ) judge_command( \"XACK mystream group1 123123", "\"command\": \"XREADGROUP\", \"stream_group\": \"GROUP\", \"group\": \"mygroup1\", \"consumer\": \"Bob\", \"streams\": \"STREAMS\",", "\"XINFO STREAM mystream\", {\"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\"}, )", "\"time\": \"TIME\", \"timestamp\": \"123456789\", }, ) judge_command( \"XCLAIM mystream mygroup", "100 NOACK STREAMS key1 1 key2 2\", { \"command\": \"XREADGROUP\",", ") judge_command(\"XINFO HELP\", {\"command\": \"XINFO\", \"help\": \"HELP\"}) judge_command(\"XINFO consumers mystream", "\"key\": \"mykey\", \"group\": \"mygroup\", }, ) judge_command(\"XGROUP destroy mykey\", None)", "\"stream_setid\": \"SETID\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\", }, )", "\"XREADGROUP\", \"stream_group\": \"GROUP\", \"group\": \"mygroup1\", \"consumer\": \"Bob\", \"count_const\": \"COUNT\", \"count\":", "\"consumer\": \"myconsumer\", }, ) judge_command( \"XPENDING mystream group55 - +", "}, ) judge_command( \"XGROUP delconsumer mykey mygroup $\", { \"command\":", "value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"key\", \"svalue\": \"value\",", "}, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 retrycount", "count 10\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"],", "None) judge_command(\"XGROUP CREATE mykey\", None) def test_xgroup_setid(judge_command): judge_command( \"XGROUP SETID", "\" XTRIM mystream MAXLEN ~ 2\", { \"command\": \"XTRIM\", \"key\":", "}, ) judge_command(\"XGROUP delconsumer mykey mygroup\", None) def test_xgroup_stream(judge_command): judge_command(", "\"key\": \"mystream\", \"full_const\": \"FULL\", \"count_const\": \"count\", \"count\": \"10\", }, )", "\"XPENDING mystream group55 myconsumer\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\":", "\"mykey\", \"group\": \"mygroup\", \"stream_id\": \"123\", }, ) judge_command( \"XGROUP SETID", ") judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 123 456", "hello world\", { \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"hello\", \"svalue\":", "\"stream_id\": \"123\", }, ) judge_command( \"XGROUP CREATE mykey mygroup $\",", "XTRIM mystream MAXLEN ~ 2\", { \"command\": \"XTRIM\", \"key\": \"mystream\",", "{ \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"consumer\": \"myconsumer\", },", "recongized to keys. \"keys\": \"mystream writers 0-0\", \"stream_id\": \"0-0\", },", "\"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"789\", }, ) judge_command( \"XCLAIM mystream", "judge_command( \"XGROUP delconsumer mykey mygroup myconsumer\", { \"command\": \"XGROUP\", \"stream_delconsumer\":", "\"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\"}, ) judge_command( \" XTRIM mystream", "mygroup1 Bob STREAMS key1 1 key2 2\", { \"command\": \"XREADGROUP\",", "\"count\", \"count\": \"10\", }, ) def test_xgroup_create(judge_command): judge_command( \"XGROUP CREATE", "group55 - + \", None) def test_xadd(judge_command): judge_command( \"xadd mystream", "\"XGROUP\", \"stream_destroy\": \"destroy\", \"key\": \"mykey\", \"group\": \"mygroup\", }, ) judge_command(\"XGROUP", "judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 JUSTID\", { \"command\":", "{ \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"stream_id\": [\"-\", \"+\"],", "\"111\"}, ) def test_xinfo(judge_command): judge_command( \"XINFO consumers mystream mygroup\", {", "\"key\": \"mystream\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\", }, )", "\"$\", }, ) # short of a parameter judge_command(\"XGROUP CREATE", "\"key\": \"mystream\", \"group\": \"group55\", \"consumer\": \"myconsumer\", }, ) judge_command( \"XPENDING", "\"group\": \"group55\", \"stream_id\": [\"-\", \"+\"], \"count\": \"10\", }, ) judge_command(", "\"BLOCK\", \"millisecond\": \"1000\", \"stream_id\": \"0-0\", }, ) def test_xreadgroup(judge_command): judge_command(", "def test_xclaim(judge_command): judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0\", {", "\"2\", \"streams\": \"STREAMS\", # FIXME current grammar can't support multiple", "\"group1\", \"stream_id\": \"111\"}, ) def test_xinfo(judge_command): judge_command( \"XINFO consumers mystream", "judge_command(\"XPENDING mystream group55 - + \", None) def test_xadd(judge_command): judge_command(", "mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\",", ") judge_command( \"XINFO STREAM mystream FULL count 10\", { \"command\":", "\"mystream\", \"group\": \"group55\", \"stream_id\": [\"-\", \"+\"], \"count\": \"10\", \"consumer\": \"myconsumer\",", "~ 1000 * key value\", { \"command\": \"xadd\", \"key\": \"mystream\",", "MAXLEN ~ 2\", { \"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\",", "1581165000000 1549611229000 1581060831000\", {\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581060831000\"}, )", "\"key\": \"mystream\", \"group\": \"group1\", \"stream_id\": \"111\"}, ) def test_xinfo(judge_command): judge_command(", "# test for MAXLEN option judge_command( \"xadd mystream MAXLEN 1000", "test_xtrim(judge_command): judge_command( \" XTRIM mystream MAXLEN 2\", {\"command\": \"XTRIM\", \"key\":", "\"millisecond\": \"100\", \"noack\": \"NOACK\", \"streams\": \"STREAMS\", \"keys\": \"key1 1 key2\",", "\"CREATE\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\", }, ) #", "judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0\", { \"command\": \"XCLAIM\",", "mygroup Alice 3600000 1526569498055-0 FORCE\", { \"command\": \"XCLAIM\", \"key\": \"mystream\",", "mygroup\", None) def test_xinfo_with_full(judge_command): judge_command( \"XINFO STREAM mystream FULL\", {", "delconsumer mykey mygroup\", None) def test_xgroup_stream(judge_command): judge_command( \"XACK mystream group1", "test_xrange(judge_command): judge_command( \"XRANGE somestream - +\", {\"command\": \"XRANGE\", \"key\": \"somestream\",", "\"key\": \"mykey\", \"group\": \"mygroup\", \"consumer\": \"$\", }, ) judge_command(\"XGROUP delconsumer", "mystream 1581165000000 1549611229000 1581060831000\", {\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581060831000\"},", ") # short of a parameter judge_command(\"XGROUP CREATE mykey mygroup\",", "judge_command( \"XGROUP delconsumer mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_delconsumer\":", "\"+\"], \"count\": \"10\", \"consumer\": \"myconsumer\", }, ) judge_command(\"XPENDING mystream group55", "123-123 key value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"key\",", "\"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"time\": \"TIME\", \"timestamp\": \"123456789\", }, )", "\"~\", }, ) judge_command(\" XTRIM mystream\", None) def test_xdel(judge_command): judge_command(", "\"mystream\", \"group\": \"mygroup\", }, ) judge_command( \"XINFO GROUPS mystream\", {\"command\":", "{ \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], }, )", "\"count_const\": \"count\", \"count\": \"10\", }, ) def test_xpending(judge_command): judge_command( \"XPENDING", "\"key\", \"svalue\": \"value\", \"stream_id\": \"123-123\", }, ) judge_command( \"xadd mystream", "\"mygroup\", \"consumer\": \"myconsumer\", }, ) judge_command( \"XGROUP delconsumer mykey mygroup", "judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 123 456 789\",", "1 key2\", \"stream_id\": \"2\", }, ) judge_command(\"XREADGROUP GROUP group consumer\",", "123123\", { \"command\": \"XACK\", \"key\": \"mystream\", \"group\": \"group1\", \"stream_id\": \"123123\",", "mygroup myconsumer\", { \"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\", \"group\":", "\"XRANGE somestream 1526985054069 1526985055069-10\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\":", "JUSTID\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\",", "myconsumer\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"stream_id\": [\"-\",", "value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"1000\",", "\"XACK\", \"key\": \"mystream\", \"group\": \"group1\", \"stream_id\": \"123123\", }, ) judge_command(", "None) def test_xgroup_delconsumer(judge_command): judge_command( \"XGROUP delconsumer mykey mygroup myconsumer\", {", "\"keys\": \"key1 1 key2\", \"stream_id\": \"2\", }, ) judge_command(\"XREADGROUP GROUP", "\"1000\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\", }, ) judge_command(", "{\"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\"}, ) judge_command( \"XPENDING mystream", "current grammar can't support multiple tokens # so the ids", "\"STREAMS\", \"keys\": \"mystream writers 0-0\", \"block\": \"BLOCK\", \"millisecond\": \"1000\", \"stream_id\":", "}, ) judge_command( \"XGROUP SETID mykey mygroup $\", { \"command\":", "consumers mystream mygroup GROUPS mystream\", None) judge_command(\"XINFO groups mystream mygroup\",", "}, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 IDEL", "judge_command( \"XGROUP SETID mykey mygroup 123\", { \"command\": \"XGROUP\", \"stream_setid\":", "\"key\", \"svalue\": \"value\", \"stream_id\": \"*\", }, ) judge_command( \"xadd mystream", "destroy mykey mygroup\", { \"command\": \"XGROUP\", \"stream_destroy\": \"destroy\", \"key\": \"mykey\",", "test_xdel(judge_command): judge_command( \"XDEL mystream 1581165000000 1549611229000 1581060831000\", {\"command\": \"XDEL\", \"key\":", "\"block\": \"BLOCK\", \"millisecond\": \"100\", \"noack\": \"NOACK\", \"streams\": \"STREAMS\", \"keys\": \"key1", "+ 10 myconsumer\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\",", "\"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069\"], }, ) judge_command(", "mystream\", {\"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\"}, ) judge_command(\"XINFO HELP\",", "group55\", {\"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\"}, ) judge_command( \"XPENDING", "\"mystream\", \"group\": \"group55\", \"consumer\": \"myconsumer\", }, ) judge_command( \"XPENDING mystream", "2\", {\"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\"}, )", "judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 retrycount 7\", {", "a parameter judge_command(\"XGROUP CREATE mykey mygroup\", None) judge_command(\"XGROUP CREATE mykey\",", "[\"-\", \"+\"], \"count\": \"10\", \"consumer\": \"myconsumer\", }, ) judge_command(\"XPENDING mystream", "somestream 1526985054069 1526985055069-10 count 10\", { \"command\": \"XRANGE\", \"key\": \"somestream\",", "judge_command(\"XGROUP DESTROY mykey mygroup $\", None) def test_xgroup_delconsumer(judge_command): judge_command( \"XGROUP", "\"command\": \"XGROUP\", \"stream_create\": \"CREATE\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\",", "\"myconsumer\", }, ) judge_command( \"XGROUP delconsumer mykey mygroup $\", {", "\"mykey\", \"group\": \"mygroup\", \"consumer\": \"$\", }, ) judge_command(\"XGROUP delconsumer mykey", "}, ) judge_command( \"XPENDING mystream group55 - + 10 myconsumer\",", "\"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\"}, ) judge_command( \"", "def test_xgroup_create(judge_command): judge_command( \"XGROUP CREATE mykey mygroup 123\", { \"command\":", "\"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\":", "\"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"consumer\": \"myconsumer\", }, ) judge_command(", "\"noack\": \"NOACK\", \"streams\": \"STREAMS\", \"keys\": \"key1 1 key2\", \"stream_id\": \"2\",", "\"stream_id\": \"123\", }, ) judge_command( \"XGROUP SETID mykey mygroup $\",", "\"stream_destroy\": \"destroy\", \"key\": \"mykey\", \"group\": \"mygroup\", }, ) judge_command(\"XGROUP destroy", "111\", {\"command\": \"XACK\", \"key\": \"mystream\", \"group\": \"group1\", \"stream_id\": \"111\"}, )", "{ \"command\": \"xadd\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"1000\", \"sfield\":", "None) def test_xgroup_setid(judge_command): judge_command( \"XGROUP SETID mykey mygroup 123\", {", "\"count\": \"1000\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\", }, )", "GROUPS mystream\", {\"command\": \"XINFO\", \"stream_groups\": \"GROUPS\", \"key\": \"mystream\"}, ) judge_command(", "\"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"force\": \"FORCE\", }, ) judge_command( \"XCLAIM", "\"xadd\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"1000\", \"sfield\": \"key\", \"svalue\":", "judge_command( \"XRANGE somestream 1526985054069 1526985055069-10 count 10\", { \"command\": \"XRANGE\",", "\"1526569498055-0\", \"force\": \"FORCE\", }, ) judge_command( \"XCLAIM mystream mygroup Alice", "\"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"justid\":", "\"command\": \"XGROUP\", \"stream_create\": \"CREATE\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"123\",", "\"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", },", "\"stream_id\": \"111\"}, ) def test_xinfo(judge_command): judge_command( \"XINFO consumers mystream mygroup\",", "writers 0-0 0-0\", { \"command\": \"XREAD\", \"count_const\": \"COUNT\", \"count\": \"2\",", "\"key1 1 key2\", \"stream_id\": \"2\", }, ) judge_command( \"XREADGROUP GROUP", "\"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"time\": \"TIME\", \"timestamp\": \"123456789\",", "\"value\", \"stream_id\": \"123-123\", }, ) judge_command( \"xadd mystream 123-123 key", "10\", { \"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\",", "SETID mykey mygroup $\", None) def test_xgroup_destroy(judge_command): judge_command( \"XGROUP destroy", "\"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"force\":", "writers 0-0\", \"block\": \"BLOCK\", \"millisecond\": \"1000\", \"stream_id\": \"0-0\", }, )", "\"count\": \"1\", \"block\": \"BLOCK\", \"millisecond\": \"100\", \"noack\": \"NOACK\", \"streams\": \"STREAMS\",", "- +\", {\"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"-\", \"+\"]}, )", "\"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"consumer\": \"myconsumer\", }, )", "judge_command( \"XPENDING mystream group55 myconsumer\", { \"command\": \"XPENDING\", \"key\": \"mystream\",", "\"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\", \"group\": \"mygroup\", \"consumer\": \"$\", },", "STREAMS key1 1 key2 2\", { \"command\": \"XREADGROUP\", \"stream_group\": \"GROUP\",", "\"keys\": \"mystream writers 0-0\", \"block\": \"BLOCK\", \"millisecond\": \"1000\", \"stream_id\": \"0-0\",", "short of a parameter judge_command(\"XGROUP CREATE mykey mygroup\", None) judge_command(\"XGROUP", "judge_command(\"XGROUP CREATE mykey\", None) def test_xgroup_setid(judge_command): judge_command( \"XGROUP SETID mykey", "MAXLEN ~ 1000 * key value\", { \"command\": \"xadd\", \"key\":", ") judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 TIME 123456789\",", "\"key1 1 key2\", \"stream_id\": \"2\", }, ) judge_command(\"XREADGROUP GROUP group", "mystream 123-123 key value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\":", "def test_xtrim(judge_command): judge_command( \" XTRIM mystream MAXLEN 2\", {\"command\": \"XTRIM\",", "\"mygroup1\", \"consumer\": \"Bob\", \"streams\": \"STREAMS\", \"keys\": \"key1 1 key2\", \"stream_id\":", "test_xgroup_stream(judge_command): judge_command( \"XACK mystream group1 123123\", { \"command\": \"XACK\", \"key\":", "mykey\", None) def test_xgroup_setid(judge_command): judge_command( \"XGROUP SETID mykey mygroup 123\",", ") judge_command( \"XRANGE somestream 1526985054069 1526985055069\", { \"command\": \"XRANGE\", \"key\":", "\"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\",", "COUNT 2 BLOCK 1000 STREAMS mystream writers 0-0 0-0\", {", "}, ) judge_command( \"XGROUP CREATE mykey mygroup $\", { \"command\":", "\"maxlen\": \"MAXLEN\", \"approximately\": \"~\", \"count\": \"1000\", \"sfield\": \"key\", \"svalue\": \"value\",", "}, ) # two subcommand together shouldn't match judge_command(\"XGROUP CREATE", "\"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\", }, ) # spcify", "judge_command( \"xadd mystream 123-123 key value foo bar hello world\",", "def test_xgroup_delconsumer(judge_command): judge_command( \"XGROUP delconsumer mykey mygroup myconsumer\", { \"command\":", ") judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 FORCE\", {", "\"key\": \"mystream\"}, ) judge_command( \"XINFO STREAM mystream\", {\"command\": \"XINFO\", \"stream\":", "mygroup\", { \"command\": \"XINFO\", \"stream_consumers\": \"consumers\", \"key\": \"mystream\", \"group\": \"mygroup\",", ") judge_command( \"XDEL mystream 1581165000000\", {\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\":", "\"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 FORCE\", { \"command\": \"XCLAIM\",", ") def test_xgroup_create(judge_command): judge_command( \"XGROUP CREATE mykey mygroup 123\", {", "1 key2\", \"stream_id\": \"2\", }, ) judge_command( \"XREADGROUP GROUP mygroup1", "mystream mygroup Alice 3600000 1526569498055-0 retrycount 7\", { \"command\": \"XCLAIM\",", "\"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"time\": \"TIME\", \"timestamp\": \"123456789\", },", "\"CREATE\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"123\", }, ) judge_command(", "\"stream_id\": \"1526569498055-0\", \"force\": \"FORCE\", }, ) judge_command( \"XCLAIM mystream mygroup", "None) judge_command(\"XINFO groups mystream mygroup\", None) def test_xinfo_with_full(judge_command): judge_command( \"XINFO", "+ \", None) def test_xadd(judge_command): judge_command( \"xadd mystream MAXLEN ~", "\"group\": \"group1\", \"stream_id\": \"111\"}, ) def test_xinfo(judge_command): judge_command( \"XINFO consumers", "\"group\": \"mygroup1\", \"consumer\": \"Bob\", \"streams\": \"STREAMS\", \"keys\": \"key1 1 key2\",", "# short of a parameter judge_command(\"XGROUP CREATE mykey mygroup\", None)", "- + 10\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\",", "test_xinfo_with_full(judge_command): judge_command( \"XINFO STREAM mystream FULL\", { \"command\": \"XINFO\", \"stream\":", "\"full_const\": \"FULL\", }, ) judge_command( \"XINFO STREAM mystream FULL count", "def test_xread(judge_command): judge_command( \"XREAD COUNT 2 STREAMS mystream writers 0-0", "test_xgroup_create(judge_command): judge_command( \"XGROUP CREATE mykey mygroup 123\", { \"command\": \"XGROUP\",", "}, ) judge_command( \"XACK mystream group1 123123 111\", {\"command\": \"XACK\",", "XTRIM mystream\", None) def test_xdel(judge_command): judge_command( \"XDEL mystream 1581165000000 1549611229000", "\"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", }, ) judge_command(", "10\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"stream_id\": [\"-\",", "\"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"force\": \"FORCE\", }, ) judge_command(", "\"123\", }, ) judge_command( \"XGROUP CREATE mykey mygroup $\", {", "mygroup\", None) def test_xgroup_stream(judge_command): judge_command( \"XACK mystream group1 123123\", {", "\"group\": \"mygroup\", }, ) judge_command(\"XGROUP destroy mykey\", None) judge_command(\"XGROUP DESTROY", "\"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"789\", }, )", "def test_xadd(judge_command): judge_command( \"xadd mystream MAXLEN ~ 1000 * key", "[\"1526985054069\", \"1526985055069-10\"], }, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069-10 count", ") judge_command( \"XREADGROUP GROUP mygroup1 Bob STREAMS key1 1 key2", "Alice 3600000 1526569498055-0 TIME 123456789\", { \"command\": \"XCLAIM\", \"key\": \"mystream\",", "mystream 123-123 key value foo bar hello world\", { \"command\":", "\"1526985055069-10\"], }, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069-10 count 10\",", "\"stream_id\": [\"-\", \"+\"], \"count\": \"10\", \"consumer\": \"myconsumer\", }, ) judge_command(\"XPENDING", ") judge_command( \" XTRIM mystream MAXLEN ~ 2\", { \"command\":", "judge_command( \"XINFO consumers mystream mygroup\", { \"command\": \"XINFO\", \"stream_consumers\": \"consumers\",", "\"mygroup\", \"stream_id\": \"123\", }, ) judge_command( \"XGROUP CREATE mykey mygroup", "mystream FULL count 10\", { \"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\":", "\"XGROUP SETID mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_setid\": \"SETID\",", "\"retrycount\": \"retrycount\", \"count\": \"7\", }, ) judge_command( \"XCLAIM mystream mygroup", "\"millisecond\": [\"3600000\", \"300\"], \"stream_id\": \"1526569498055-0\", \"idel\": \"IDEL\", }, ) judge_command(", "\"group\": \"mygroup\", \"stream_id\": \"$\", }, ) # short of a", "\"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"retrycount\": \"retrycount\", \"count\": \"7\", }, )", "}, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069-10 count 10\", {", "\"stream_id\": \"0-0\", }, ) def test_xreadgroup(judge_command): judge_command( \"XREADGROUP GROUP mygroup1", "\"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\", \"approximately\": \"~\", }, ) judge_command(\"", "\"$\", }, ) judge_command(\"XGROUP delconsumer mykey mygroup\", None) def test_xgroup_stream(judge_command):", "\"stream_group\": \"GROUP\", \"group\": \"mygroup1\", \"consumer\": \"Bob\", \"count_const\": \"COUNT\", \"count\": \"1\",", "groups mystream mygroup\", None) def test_xinfo_with_full(judge_command): judge_command( \"XINFO STREAM mystream", "\"retrycount\", \"count\": \"7\", }, ) judge_command( \"XCLAIM mystream mygroup Alice", "{\"command\": \"XACK\", \"key\": \"mystream\", \"group\": \"group1\", \"stream_id\": \"111\"}, ) def", "judge_command( \"XRANGE somestream - +\", {\"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\":", "Alice 3600000 1526569498055-0 JUSTID\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\":", "\"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], }, ) judge_command( \"XRANGE somestream", "{\"command\": \"XINFO\", \"stream_groups\": \"GROUPS\", \"key\": \"mystream\"}, ) judge_command( \"XINFO STREAM", "\"XINFO\", \"help\": \"HELP\"}) judge_command(\"XINFO consumers mystream mygroup GROUPS mystream\", None)", "mystream MAXLEN ~ 1000 * key value\", { \"command\": \"xadd\",", "judge_command( \"XINFO GROUPS mystream\", {\"command\": \"XINFO\", \"stream_groups\": \"GROUPS\", \"key\": \"mystream\"},", "MAXLEN option judge_command( \"xadd mystream MAXLEN 1000 * key value\",", "def test_xdel(judge_command): judge_command( \"XDEL mystream 1581165000000 1549611229000 1581060831000\", {\"command\": \"XDEL\",", "}, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 FORCE\",", "can't support multiple tokens # so the ids will be", "mystream mygroup GROUPS mystream\", None) judge_command(\"XINFO groups mystream mygroup\", None)", "+ 10\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"stream_id\":", "TIME 123456789\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\":", "somestream - +\", {\"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"-\", \"+\"]},", "FIXME current grammar can't support multiple tokens # so the", "\"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"retrycount\": \"retrycount\",", "\"XRANGE somestream 1526985054069 1526985055069\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\":", "\"XINFO\", \"stream_consumers\": \"consumers\", \"key\": \"mystream\", \"group\": \"mygroup\", }, ) judge_command(", "\"hello\", \"svalue\": \"world\", \"stream_id\": \"123-123\", }, ) def test_xtrim(judge_command): judge_command(", "\"mystream\", \"maxlen\": \"MAXLEN\", \"approximately\": \"~\", \"count\": \"1000\", \"sfield\": \"key\", \"svalue\":", "{ \"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\", },", "mystream mygroup Alice 3600000 1526569498055-0 IDEL 300\", { \"command\": \"XCLAIM\",", "0-0\", \"stream_id\": \"0-0\", }, ) judge_command( \"XREAD COUNT 2 BLOCK", "\"stream_setid\": \"SETID\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"123\", }, )", "def test_xreadgroup(judge_command): judge_command( \"XREADGROUP GROUP mygroup1 Bob COUNT 1 BLOCK", "\"MAXLEN\", \"approximately\": \"~\", \"count\": \"1000\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\":", "\"key\": \"mystream\", \"group\": \"group55\", \"stream_id\": [\"-\", \"+\"], \"count\": \"10\", \"consumer\":", "\"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": [\"3600000\",", "\"stream_id\": \"1526569498055-0\", \"time\": \"TIME\", \"timestamp\": \"123456789\", }, ) judge_command( \"XCLAIM", "\"mystream\", \"stream_id\": \"1581165000000\"}, ) def test_xclaim(judge_command): judge_command( \"XCLAIM mystream mygroup", "\"group55\", \"consumer\": \"myconsumer\", }, ) judge_command( \"XPENDING mystream group55 -", "judge_command(\"XGROUP CREATE mykey mygroup\", None) judge_command(\"XGROUP CREATE mykey\", None) def", "\"consumer\": \"Bob\", \"streams\": \"STREAMS\", \"keys\": \"key1 1 key2\", \"stream_id\": \"2\",", "}, ) judge_command( \"xadd mystream * key value\", { \"command\":", "XTRIM mystream MAXLEN 2\", {\"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\",", "Bob COUNT 1 BLOCK 100 NOACK STREAMS key1 1 key2", "mygroup $\", { \"command\": \"XGROUP\", \"stream_setid\": \"SETID\", \"key\": \"mykey\", \"group\":", "\"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\", }, ) # short of", "+\", {\"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"-\", \"+\"]}, ) judge_command(", "CREATE mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_create\": \"CREATE\", \"key\":", "key2\", \"stream_id\": \"2\", }, ) judge_command(\"XREADGROUP GROUP group consumer\", None)", "\"count\": \"2\", \"streams\": \"STREAMS\", \"keys\": \"mystream writers 0-0\", \"block\": \"BLOCK\",", "myconsumer\", { \"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\", \"group\": \"mygroup\",", "\"consumer\": \"Alice\", \"millisecond\": [\"3600000\", \"300\"], \"stream_id\": \"1526569498055-0\", \"idel\": \"IDEL\", },", "3600000 1526569498055-0 JUSTID\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\",", "{\"command\": \"XINFO\", \"help\": \"HELP\"}) judge_command(\"XINFO consumers mystream mygroup GROUPS mystream\",", "\"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"stream_id\": [\"-\", \"+\"], \"count\": \"10\",", "\"~\", \"count\": \"1000\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\", },", ") judge_command( \"xadd mystream * key value\", { \"command\": \"xadd\",", "3600000 1526569498055-0 retrycount 7\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\":", "\"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], \"count_const\": \"count\", \"count\": \"10\",", "\"command\": \"XGROUP\", \"stream_setid\": \"SETID\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"123\",", "\"key\": \"mykey\", \"group\": \"mygroup\", \"consumer\": \"myconsumer\", }, ) judge_command( \"XGROUP", "\"stream_id\": \"123-123\", }, ) def test_xtrim(judge_command): judge_command( \" XTRIM mystream", "mystream mygroup Alice 3600000 1526569498055-0 TIME 123456789\", { \"command\": \"XCLAIM\",", "3600000 1526569498055-0 TIME 123456789\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\":", "\"mystream writers 0-0\", \"stream_id\": \"0-0\", }, ) judge_command( \"XREAD COUNT", "\"xadd mystream MAXLEN 1000 * key value\", { \"command\": \"xadd\",", "\"GROUP\", \"group\": \"mygroup1\", \"consumer\": \"Bob\", \"streams\": \"STREAMS\", \"keys\": \"key1 1", "\"SETID\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\", }, ) #", "{ \"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\", \"group\": \"mygroup\", \"consumer\":", "# spcify stream id judge_command( \"xadd mystream 123-123 key value\",", ") def test_xread(judge_command): judge_command( \"XREAD COUNT 2 STREAMS mystream writers", "\"maxlen\": \"MAXLEN\", \"count\": \"2\", \"approximately\": \"~\", }, ) judge_command(\" XTRIM", "CREATE mykey mygroup 123\", { \"command\": \"XGROUP\", \"stream_create\": \"CREATE\", \"key\":", "\"streams\": \"STREAMS\", \"keys\": \"key1 1 key2\", \"stream_id\": \"2\", }, )", ") # test for MAXLEN option judge_command( \"xadd mystream MAXLEN", "}, ) judge_command(\"XGROUP destroy mykey\", None) judge_command(\"XGROUP DESTROY mykey mygroup", "test for MAXLEN option judge_command( \"xadd mystream MAXLEN 1000 *", "mykey mygroup\", None) judge_command(\"XGROUP CREATE mykey\", None) def test_xgroup_setid(judge_command): judge_command(", "\"timestamp\": \"123456789\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000", "\"XACK mystream group1 123123 111\", {\"command\": \"XACK\", \"key\": \"mystream\", \"group\":", "\"stream_id\": \"1526569498055-0\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000", "\"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\", }, ) judge_command(", "0-0\", \"block\": \"BLOCK\", \"millisecond\": \"1000\", \"stream_id\": \"0-0\", }, ) def", "\"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\", }, ) # short", "ids will be recongized to keys. \"keys\": \"mystream writers 0-0\",", "mystream\", None) judge_command(\"XINFO groups mystream mygroup\", None) def test_xinfo_with_full(judge_command): judge_command(", "123456789\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\",", "\"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\", \"approximately\": \"~\", },", "\"3600000\", \"stream_id\": \"1526569498055-0\", \"time\": \"TIME\", \"timestamp\": \"123456789\", }, ) judge_command(", "\"JUSTID\", }, ) def test_xread(judge_command): judge_command( \"XREAD COUNT 2 STREAMS", "\"stream_id\": [\"-\", \"+\"]}, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069\", {", "\"count_const\": \"COUNT\", \"count\": \"1\", \"block\": \"BLOCK\", \"millisecond\": \"100\", \"noack\": \"NOACK\",", "\"delconsumer\", \"key\": \"mykey\", \"group\": \"mygroup\", \"consumer\": \"$\", }, ) judge_command(\"XGROUP", "\"10\", }, ) def test_xgroup_create(judge_command): judge_command( \"XGROUP CREATE mykey mygroup", "\"mystream\"}, ) judge_command(\"XINFO HELP\", {\"command\": \"XINFO\", \"help\": \"HELP\"}) judge_command(\"XINFO consumers", "\"group\": \"group55\"}, ) judge_command( \"XPENDING mystream group55 myconsumer\", { \"command\":", "None) def test_xinfo_with_full(judge_command): judge_command( \"XINFO STREAM mystream FULL\", { \"command\":", "}, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069-10\", { \"command\": \"XRANGE\",", ") judge_command( \"xadd mystream 123-123 key value foo bar hello", "\"1000\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\", }, ) #", "\"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581165000000\"}, ) def test_xclaim(judge_command): judge_command( \"XCLAIM", "\"count\": \"2\", \"streams\": \"STREAMS\", # FIXME current grammar can't support", "multiple tokens # so the ids will be recongized to", "None) def test_xadd(judge_command): judge_command( \"xadd mystream MAXLEN ~ 1000 *", "}, ) def test_xtrim(judge_command): judge_command( \" XTRIM mystream MAXLEN 2\",", ") judge_command( \"XINFO STREAM mystream\", {\"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\":", "judge_command( \"XINFO STREAM mystream FULL count 10\", { \"command\": \"XINFO\",", ") judge_command( \"XPENDING mystream group55 - + 10\", { \"command\":", "1549611229000 1581060831000\", {\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581060831000\"}, ) judge_command(", "judge_command( \"xadd mystream MAXLEN ~ 1000 * key value\", {", "}, ) # test for MAXLEN option judge_command( \"xadd mystream", ") judge_command( \"XGROUP SETID mykey mygroup $\", { \"command\": \"XGROUP\",", "\"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"justid\": \"JUSTID\", }, ) def", "mygroup 123\", { \"command\": \"XGROUP\", \"stream_setid\": \"SETID\", \"key\": \"mykey\", \"group\":", "{ \"command\": \"XREADGROUP\", \"stream_group\": \"GROUP\", \"group\": \"mygroup1\", \"consumer\": \"Bob\", \"streams\":", "\"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\", }, ) judge_command( \"xadd", "\"group\": \"mygroup\", \"stream_id\": \"123\", }, ) judge_command( \"XGROUP SETID mykey", "None) def test_xgroup_stream(judge_command): judge_command( \"XACK mystream group1 123123\", { \"command\":", "\"123\", }, ) judge_command( \"XGROUP SETID mykey mygroup $\", {", "\"1581165000000\"}, ) def test_xclaim(judge_command): judge_command( \"XCLAIM mystream mygroup Alice 3600000", "\"idel\": \"IDEL\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000", "DESTROY mykey mygroup $\", None) def test_xgroup_delconsumer(judge_command): judge_command( \"XGROUP delconsumer", "\"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"retrycount\": \"retrycount\", \"count\": \"7\", },", "\"XDEL mystream 1581165000000\", {\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581165000000\"}, )", "\"XGROUP\", \"stream_setid\": \"SETID\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\", },", "\"count\": \"2\", \"approximately\": \"~\", }, ) judge_command(\" XTRIM mystream\", None)", "\"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"justid\": \"JUSTID\", }, ) def test_xread(judge_command):", "judge_command( \"XGROUP CREATE mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_create\":", "\"3600000\", \"stream_id\": \"789\", }, ) judge_command( \"XCLAIM mystream mygroup Alice", "\"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069\"], }, ) judge_command( \"XRANGE somestream", "mygroup Alice 3600000 1526569498055-0\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\":", "\"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069\"], }, ) judge_command( \"XRANGE", "\"command\": \"XGROUP\", \"stream_destroy\": \"destroy\", \"key\": \"mykey\", \"group\": \"mygroup\", }, )", "value foo bar hello world\", { \"command\": \"xadd\", \"key\": \"mystream\",", "\"STREAMS\", # FIXME current grammar can't support multiple tokens #", "GROUPS mystream\", None) judge_command(\"XINFO groups mystream mygroup\", None) def test_xinfo_with_full(judge_command):", "\"1526569498055-0\", \"idel\": \"IDEL\", }, ) judge_command( \"XCLAIM mystream mygroup Alice", "GROUP mygroup1 Bob STREAMS key1 1 key2 2\", { \"command\":", "STREAM mystream\", {\"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\"}, ) judge_command(\"XINFO", "\"xadd mystream 123-123 key value\", { \"command\": \"xadd\", \"key\": \"mystream\",", "\"group\": \"group55\", \"consumer\": \"myconsumer\", }, ) judge_command( \"XPENDING mystream group55", "\"mystream\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\", }, ) #", "mygroup Alice 3600000 1526569498055-0 JUSTID\", { \"command\": \"XCLAIM\", \"key\": \"mystream\",", "\"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"789\", }, ) judge_command(", "123 456 789\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\",", "test_xclaim(judge_command): judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0\", { \"command\":", "\"XGROUP delconsumer mykey mygroup myconsumer\", { \"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\",", "\"stream_create\": \"CREATE\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"123\", }, )", ") def test_xclaim(judge_command): judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0\",", "\"keys\": \"key1 1 key2\", \"stream_id\": \"2\", }, ) judge_command( \"XREADGROUP", "$\", { \"command\": \"XGROUP\", \"stream_setid\": \"SETID\", \"key\": \"mykey\", \"group\": \"mygroup\",", "}, ) def test_xgroup_create(judge_command): judge_command( \"XGROUP CREATE mykey mygroup 123\",", "\"consumer\": \"$\", }, ) judge_command(\"XGROUP delconsumer mykey mygroup\", None) def", "}, ) # spcify stream id judge_command( \"xadd mystream 123-123", "2\", { \"command\": \"XREADGROUP\", \"stream_group\": \"GROUP\", \"group\": \"mygroup1\", \"consumer\": \"Bob\",", "mygroup Alice 3600000 1526569498055-0 TIME 123456789\", { \"command\": \"XCLAIM\", \"key\":", "subcommand together shouldn't match judge_command(\"XGROUP CREATE mykey mygroup 123 SETID", "\"count\", \"count\": \"10\", }, ) def test_xpending(judge_command): judge_command( \"XPENDING mystream", "mystream group1 123123\", { \"command\": \"XACK\", \"key\": \"mystream\", \"group\": \"group1\",", "789\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\",", "test_xadd(judge_command): judge_command( \"xadd mystream MAXLEN ~ 1000 * key value\",", "key value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"approximately\":", "\"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069\"], }, ) judge_command( \"XRANGE somestream 1526985054069", "def test_xgroup_stream(judge_command): judge_command( \"XACK mystream group1 123123\", { \"command\": \"XACK\",", "{ \"command\": \"XINFO\", \"stream_consumers\": \"consumers\", \"key\": \"mystream\", \"group\": \"mygroup\", },", "\"mystream\", \"sfield\": \"hello\", \"svalue\": \"world\", \"stream_id\": \"123-123\", }, ) def", "\"100\", \"noack\": \"NOACK\", \"streams\": \"STREAMS\", \"keys\": \"key1 1 key2\", \"stream_id\":", "# so the ids will be recongized to keys. \"keys\":", "mykey mygroup 123\", { \"command\": \"XGROUP\", \"stream_setid\": \"SETID\", \"key\": \"mykey\",", "to keys. \"keys\": \"mystream writers 0-0\", \"stream_id\": \"0-0\", }, )", "def test_xgroup_destroy(judge_command): judge_command( \"XGROUP destroy mykey mygroup\", { \"command\": \"XGROUP\",", "bar hello world\", { \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"hello\",", "{\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581060831000\"}, ) judge_command( \"XDEL mystream", "300\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\",", "\"stream_id\": \"*\", }, ) # test for MAXLEN option judge_command(", ") judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 JUSTID\", {", "\"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\"}, ) judge_command(\"XINFO HELP\", {\"command\": \"XINFO\",", "\"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\", }, ) # two subcommand", "\"mystream\", \"group\": \"group1\", \"stream_id\": \"111\"}, ) def test_xinfo(judge_command): judge_command( \"XINFO", "mystream MAXLEN 1000 * key value\", { \"command\": \"xadd\", \"key\":", "\"maxlen\": \"MAXLEN\", \"count\": \"2\"}, ) judge_command( \" XTRIM mystream MAXLEN", "10\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], \"count_const\":", "}, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 TIME", "\"key\", \"svalue\": \"value\", \"stream_id\": \"*\", }, ) # spcify stream", "\"key\": \"mystream\", \"group\": \"mygroup\", }, ) judge_command( \"XINFO GROUPS mystream\",", "judge_command( \"xadd mystream * key value\", { \"command\": \"xadd\", \"key\":", "\"MAXLEN\", \"count\": \"2\", \"approximately\": \"~\", }, ) judge_command(\" XTRIM mystream\",", "\"stream_id\": \"0-0\", }, ) judge_command( \"XREAD COUNT 2 BLOCK 1000", "\"BLOCK\", \"millisecond\": \"100\", \"noack\": \"NOACK\", \"streams\": \"STREAMS\", \"keys\": \"key1 1", "\"XRANGE somestream - +\", {\"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"-\",", "1526569498055-0 FORCE\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\":", "GROUP mygroup1 Bob COUNT 1 BLOCK 100 NOACK STREAMS key1", "\"count\": \"2\"}, ) judge_command( \" XTRIM mystream MAXLEN ~ 2\",", ") judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 IDEL 300\",", "def test_xinfo(judge_command): judge_command( \"XINFO consumers mystream mygroup\", { \"command\": \"XINFO\",", "judge_command( \"XREAD COUNT 2 BLOCK 1000 STREAMS mystream writers 0-0", "{ \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], \"count_const\": \"count\",", "\"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\", \"group\": \"mygroup\", \"consumer\": \"myconsumer\", },", "1 BLOCK 100 NOACK STREAMS key1 1 key2 2\", {", "judge_command( \" XTRIM mystream MAXLEN ~ 2\", { \"command\": \"XTRIM\",", "\"HELP\"}) judge_command(\"XINFO consumers mystream mygroup GROUPS mystream\", None) judge_command(\"XINFO groups", "\"mystream writers 0-0\", \"block\": \"BLOCK\", \"millisecond\": \"1000\", \"stream_id\": \"0-0\", },", "\"keys\": \"mystream writers 0-0\", \"stream_id\": \"0-0\", }, ) judge_command( \"XREAD", "MAXLEN 2\", {\"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\"},", "\"command\": \"xadd\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"1000\", \"sfield\": \"key\",", "{ \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\":", "\"STREAM\", \"key\": \"mystream\"}, ) judge_command(\"XINFO HELP\", {\"command\": \"XINFO\", \"help\": \"HELP\"})", "mygroup Alice 3600000 1526569498055-0 IDEL 300\", { \"command\": \"XCLAIM\", \"key\":", "grammar can't support multiple tokens # so the ids will", "\"key\": \"mystream\", \"group\": \"group55\"}, ) judge_command( \"XPENDING mystream group55 myconsumer\",", "STREAM mystream FULL count 10\", { \"command\": \"XINFO\", \"stream\": \"STREAM\",", "\"mygroup\", }, ) judge_command(\"XGROUP destroy mykey\", None) judge_command(\"XGROUP DESTROY mykey", "mystream group55 myconsumer\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\",", "test_xgroup_delconsumer(judge_command): judge_command( \"XGROUP delconsumer mykey mygroup myconsumer\", { \"command\": \"XGROUP\",", "\"XREADGROUP GROUP mygroup1 Bob STREAMS key1 1 key2 2\", {", "\"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": [\"3600000\", \"300\"], \"stream_id\": \"1526569498055-0\",", "\"approximately\": \"~\", \"count\": \"1000\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\",", "1000 STREAMS mystream writers 0-0 0-0\", { \"command\": \"XREAD\", \"count_const\":", "judge_command( \"XINFO STREAM mystream\", {\"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\"},", "\"mygroup\", \"stream_id\": \"$\", }, ) # two subcommand together shouldn't", "mygroup $\", None) def test_xgroup_delconsumer(judge_command): judge_command( \"XGROUP delconsumer mykey mygroup", "\"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\", \"group\": \"mygroup\", \"consumer\": \"$\", }, )", "\"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"force\": \"FORCE\",", "\"key\": \"mystream\"}, ) judge_command(\"XINFO HELP\", {\"command\": \"XINFO\", \"help\": \"HELP\"}) judge_command(\"XINFO", "test_xreadgroup(judge_command): judge_command( \"XREADGROUP GROUP mygroup1 Bob COUNT 1 BLOCK 100", "\"stream_create\": \"CREATE\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\", }, )", "\"3600000\", \"stream_id\": \"1526569498055-0\", \"retrycount\": \"retrycount\", \"count\": \"7\", }, ) judge_command(", "\"mygroup\", \"consumer\": \"Alice\", \"millisecond\": [\"3600000\", \"300\"], \"stream_id\": \"1526569498055-0\", \"idel\": \"IDEL\",", ") judge_command(\" XTRIM mystream\", None) def test_xdel(judge_command): judge_command( \"XDEL mystream", "\"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", }, ) judge_command( \"XCLAIM mystream", "stream id judge_command( \"xadd mystream 123-123 key value\", { \"command\":", "mystream mygroup Alice 3600000 1526569498055-0 JUSTID\", { \"command\": \"XCLAIM\", \"key\":", "judge_command( \"XRANGE somestream 1526985054069 1526985055069\", { \"command\": \"XRANGE\", \"key\": \"somestream\",", "mykey mygroup\", None) def test_xgroup_stream(judge_command): judge_command( \"XACK mystream group1 123123\",", "judge_command( \"XINFO STREAM mystream FULL\", { \"command\": \"XINFO\", \"stream\": \"STREAM\",", "\"consumer\": \"myconsumer\", }, ) judge_command( \"XGROUP delconsumer mykey mygroup $\",", "\"count\": \"10\", }, ) def test_xpending(judge_command): judge_command( \"XPENDING mystream group55\",", "\"key\": \"mystream\", \"full_const\": \"FULL\", }, ) judge_command( \"XINFO STREAM mystream", "support multiple tokens # so the ids will be recongized", "\"count\": \"10\", }, ) judge_command( \"XPENDING mystream group55 - +", "FORCE\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\",", "\"2\", \"streams\": \"STREAMS\", \"keys\": \"mystream writers 0-0\", \"block\": \"BLOCK\", \"millisecond\":", "\"IDEL\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0", "shouldn't match judge_command(\"XGROUP CREATE mykey mygroup 123 SETID mykey mygroup", "key value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"key\", \"svalue\":", ") def test_xreadgroup(judge_command): judge_command( \"XREADGROUP GROUP mygroup1 Bob COUNT 1", "\"3600000\", \"stream_id\": \"1526569498055-0\", }, ) judge_command( \"XCLAIM mystream mygroup Alice", "\"consumer\": \"myconsumer\", }, ) judge_command(\"XPENDING mystream group55 - + \",", "\"XACK mystream group1 123123\", { \"command\": \"XACK\", \"key\": \"mystream\", \"group\":", "Alice 3600000 1526569498055-0 retrycount 7\", { \"command\": \"XCLAIM\", \"key\": \"mystream\",", "\"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"time\":", "test_xread(judge_command): judge_command( \"XREAD COUNT 2 STREAMS mystream writers 0-0 0-0\",", ") def test_xtrim(judge_command): judge_command( \" XTRIM mystream MAXLEN 2\", {\"command\":", "\"mystream\", \"stream_id\": \"1581060831000\"}, ) judge_command( \"XDEL mystream 1581165000000\", {\"command\": \"XDEL\",", "\"XCLAIM mystream mygroup Alice 3600000 1526569498055-0\", { \"command\": \"XCLAIM\", \"key\":", "\"streams\": \"STREAMS\", # FIXME current grammar can't support multiple tokens", "judge_command( \"XREADGROUP GROUP mygroup1 Bob COUNT 1 BLOCK 100 NOACK", "\"command\": \"XGROUP\", \"stream_setid\": \"SETID\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\",", "\"group55\", \"stream_id\": [\"-\", \"+\"], \"count\": \"10\", \"consumer\": \"myconsumer\", }, )", "\"stream_id\": \"2\", }, ) judge_command( \"XREADGROUP GROUP mygroup1 Bob STREAMS", "myconsumer\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"consumer\": \"myconsumer\",", "\"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"789\", }, ) judge_command( \"XCLAIM", "\"svalue\": \"value\", \"stream_id\": \"*\", }, ) judge_command( \"xadd mystream *", "so the ids will be recongized to keys. \"keys\": \"mystream", "writers 0-0\", \"stream_id\": \"0-0\", }, ) judge_command( \"XREAD COUNT 2", "1581165000000\", {\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581165000000\"}, ) def test_xclaim(judge_command):", "7\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\",", "\"2\"}, ) judge_command( \" XTRIM mystream MAXLEN ~ 2\", {", "\"group\": \"group55\", \"stream_id\": [\"-\", \"+\"], \"count\": \"10\", \"consumer\": \"myconsumer\", },", "\"world\", \"stream_id\": \"123-123\", }, ) def test_xtrim(judge_command): judge_command( \" XTRIM", ") judge_command(\"XPENDING mystream group55 - + \", None) def test_xadd(judge_command):", "mystream MAXLEN ~ 2\", { \"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\":", "judge_command( \"XDEL mystream 1581165000000 1549611229000 1581060831000\", {\"command\": \"XDEL\", \"key\": \"mystream\",", "\"mystream\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"123-123\", }, ) judge_command(", "mygroup 123 SETID mykey mygroup $\", None) def test_xgroup_destroy(judge_command): judge_command(", "\", None) def test_xadd(judge_command): judge_command( \"xadd mystream MAXLEN ~ 1000", "1526569498055-0 retrycount 7\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\",", "judge_command( \"XACK mystream group1 123123 111\", {\"command\": \"XACK\", \"key\": \"mystream\",", "CREATE mykey mygroup\", None) judge_command(\"XGROUP CREATE mykey\", None) def test_xgroup_setid(judge_command):", "\"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\", }, )", "\"mykey\", \"group\": \"mygroup\", \"consumer\": \"myconsumer\", }, ) judge_command( \"XGROUP delconsumer", "group1 123123 111\", {\"command\": \"XACK\", \"key\": \"mystream\", \"group\": \"group1\", \"stream_id\":", "\"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\", \"approximately\": \"~\", }, )", "123\", { \"command\": \"XGROUP\", \"stream_create\": \"CREATE\", \"key\": \"mykey\", \"group\": \"mygroup\",", "test_xgroup_setid(judge_command): judge_command( \"XGROUP SETID mykey mygroup 123\", { \"command\": \"XGROUP\",", "}, ) def test_xpending(judge_command): judge_command( \"XPENDING mystream group55\", {\"command\": \"XPENDING\",", "\"key\": \"mystream\", \"stream_id\": \"1581060831000\"}, ) judge_command( \"XDEL mystream 1581165000000\", {\"command\":", "mygroup GROUPS mystream\", None) judge_command(\"XINFO groups mystream mygroup\", None) def", "\"1581060831000\"}, ) judge_command( \"XDEL mystream 1581165000000\", {\"command\": \"XDEL\", \"key\": \"mystream\",", "mygroup $\", None) def test_xgroup_destroy(judge_command): judge_command( \"XGROUP destroy mykey mygroup\",", "test_xinfo(judge_command): judge_command( \"XINFO consumers mystream mygroup\", { \"command\": \"XINFO\", \"stream_consumers\":", "\"key\": \"mystream\", \"sfield\": \"hello\", \"svalue\": \"world\", \"stream_id\": \"123-123\", }, )", "consumers mystream mygroup\", { \"command\": \"XINFO\", \"stream_consumers\": \"consumers\", \"key\": \"mystream\",", "\"stream_id\": \"*\", }, ) # spcify stream id judge_command( \"xadd", "\"key\", \"svalue\": \"value\", \"stream_id\": \"*\", }, ) # test for", ") judge_command( \"XPENDING mystream group55 myconsumer\", { \"command\": \"XPENDING\", \"key\":", "\"count\": \"10\", \"consumer\": \"myconsumer\", }, ) judge_command(\"XPENDING mystream group55 -", "\"svalue\": \"world\", \"stream_id\": \"123-123\", }, ) def test_xtrim(judge_command): judge_command( \"", "1526569498055-0 IDEL 300\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\",", "\"XINFO STREAM mystream FULL\", { \"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\":", "\"streams\": \"STREAMS\", \"keys\": \"mystream writers 0-0\", \"block\": \"BLOCK\", \"millisecond\": \"1000\",", "}, ) judge_command( \"XREADGROUP GROUP mygroup1 Bob STREAMS key1 1", "\"1526985055069\"], }, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069-10\", { \"command\":", "judge_command( \"XACK mystream group1 123123\", { \"command\": \"XACK\", \"key\": \"mystream\",", "mystream group55 - + 10 myconsumer\", { \"command\": \"XPENDING\", \"key\":", "judge_command( \"XPENDING mystream group55\", {\"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\"},", "\"mystream\"}, ) judge_command( \"XINFO STREAM mystream\", {\"command\": \"XINFO\", \"stream\": \"STREAM\",", "\"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], \"count_const\": \"count\", \"count\":", "\"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\", \"approximately\": \"~\",", "COUNT 2 STREAMS mystream writers 0-0 0-0\", { \"command\": \"XREAD\",", "\"command\": \"XACK\", \"key\": \"mystream\", \"group\": \"group1\", \"stream_id\": \"123123\", }, )", "mygroup $\", { \"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\", \"group\":", "}, ) judge_command( \"XPENDING mystream group55 - + 10\", {", "\"millisecond\": \"3600000\", \"stream_id\": \"789\", }, ) judge_command( \"XCLAIM mystream mygroup", "\"XPENDING mystream group55 - + 10\", { \"command\": \"XPENDING\", \"key\":", "# two subcommand together shouldn't match judge_command(\"XGROUP CREATE mykey mygroup", "MAXLEN 1000 * key value\", { \"command\": \"xadd\", \"key\": \"mystream\",", ") judge_command( \"XACK mystream group1 123123 111\", {\"command\": \"XACK\", \"key\":", "\"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581060831000\"}, ) judge_command( \"XDEL mystream 1581165000000\",", "mykey\", None) judge_command(\"XGROUP DESTROY mykey mygroup $\", None) def test_xgroup_delconsumer(judge_command):", "\"1526569498055-0\", \"justid\": \"JUSTID\", }, ) def test_xread(judge_command): judge_command( \"XREAD COUNT", "\"XGROUP CREATE mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_create\": \"CREATE\",", "\"xadd\", \"key\": \"mystream\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"123-123\", },", "judge_command( \"XDEL mystream 1581165000000\", {\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581165000000\"},", "\"XREAD COUNT 2 BLOCK 1000 STREAMS mystream writers 0-0 0-0\",", "\"1000\", \"stream_id\": \"0-0\", }, ) def test_xreadgroup(judge_command): judge_command( \"XREADGROUP GROUP", "\"stream_id\": \"1526569498055-0\", \"justid\": \"JUSTID\", }, ) def test_xread(judge_command): judge_command( \"XREAD", "delconsumer mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\":", "\"stream_id\": \"123123\", }, ) judge_command( \"XACK mystream group1 123123 111\",", "key value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\":", "\"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"justid\": \"JUSTID\",", "judge_command( \"xadd mystream 123-123 key value\", { \"command\": \"xadd\", \"key\":", "\"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"retrycount\": \"retrycount\", \"count\":", "{ \"command\": \"XREADGROUP\", \"stream_group\": \"GROUP\", \"group\": \"mygroup1\", \"consumer\": \"Bob\", \"count_const\":", "\"XREAD\", \"count_const\": \"COUNT\", \"count\": \"2\", \"streams\": \"STREAMS\", # FIXME current", "\"GROUP\", \"group\": \"mygroup1\", \"consumer\": \"Bob\", \"count_const\": \"COUNT\", \"count\": \"1\", \"block\":", "$\", { \"command\": \"XGROUP\", \"stream_create\": \"CREATE\", \"key\": \"mykey\", \"group\": \"mygroup\",", "1526569498055-0 123 456 789\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\":", "mystream mygroup Alice 3600000 1526569498055-0 123 456 789\", { \"command\":", "[\"3600000\", \"300\"], \"stream_id\": \"1526569498055-0\", \"idel\": \"IDEL\", }, ) judge_command( \"XCLAIM", "\"stream_id\": \"1526569498055-0\", \"retrycount\": \"retrycount\", \"count\": \"7\", }, ) judge_command( \"XCLAIM", "\"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"$\", }, ) # two", "\"0-0\", }, ) def test_xreadgroup(judge_command): judge_command( \"XREADGROUP GROUP mygroup1 Bob", "group55 myconsumer\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"consumer\":", "key2 2\", { \"command\": \"XREADGROUP\", \"stream_group\": \"GROUP\", \"group\": \"mygroup1\", \"consumer\":", "judge_command(\"XINFO groups mystream mygroup\", None) def test_xinfo_with_full(judge_command): judge_command( \"XINFO STREAM", "{\"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"-\", \"+\"]}, ) judge_command( \"XRANGE", "\"group\": \"mygroup\", \"stream_id\": \"123\", }, ) judge_command( \"XGROUP CREATE mykey", "mykey mygroup $\", None) def test_xgroup_delconsumer(judge_command): judge_command( \"XGROUP delconsumer mykey", "\"xadd mystream MAXLEN ~ 1000 * key value\", { \"command\":", "mystream group55\", {\"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\"}, ) judge_command(", "\"1526569498055-0\", \"time\": \"TIME\", \"timestamp\": \"123456789\", }, ) judge_command( \"XCLAIM mystream", "judge_command(\"XGROUP CREATE mykey mygroup 123 SETID mykey mygroup $\", None)", "\"300\"], \"stream_id\": \"1526569498055-0\", \"idel\": \"IDEL\", }, ) judge_command( \"XCLAIM mystream", "\"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"1000\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\":", "1526569498055-0 TIME 123456789\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\",", "\"COUNT\", \"count\": \"1\", \"block\": \"BLOCK\", \"millisecond\": \"100\", \"noack\": \"NOACK\", \"streams\":", "\"approximately\": \"~\", }, ) judge_command(\" XTRIM mystream\", None) def test_xdel(judge_command):", "\"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"-\", \"+\"]}, ) judge_command( \"XRANGE somestream", "\"Bob\", \"streams\": \"STREAMS\", \"keys\": \"key1 1 key2\", \"stream_id\": \"2\", },", "mygroup Alice 3600000 1526569498055-0 retrycount 7\", { \"command\": \"XCLAIM\", \"key\":", "\"justid\": \"JUSTID\", }, ) def test_xread(judge_command): judge_command( \"XREAD COUNT 2", "\"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": [\"3600000\", \"300\"], \"stream_id\": \"1526569498055-0\", \"idel\":", ") # two subcommand together shouldn't match judge_command(\"XGROUP CREATE mykey", "\"group\": \"mygroup\", \"consumer\": \"myconsumer\", }, ) judge_command( \"XGROUP delconsumer mykey", "\"10\", }, ) def test_xpending(judge_command): judge_command( \"XPENDING mystream group55\", {\"command\":", "\"destroy\", \"key\": \"mykey\", \"group\": \"mygroup\", }, ) judge_command(\"XGROUP destroy mykey\",", "123-123 key value foo bar hello world\", { \"command\": \"xadd\",", "mygroup 123\", { \"command\": \"XGROUP\", \"stream_create\": \"CREATE\", \"key\": \"mykey\", \"group\":", "judge_command( \"XPENDING mystream group55 - + 10\", { \"command\": \"XPENDING\",", "1526985055069\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069\"], },", "\"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\", \"count_const\": \"count\",", "\"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"123-123\", }, ) judge_command( \"xadd", "of a parameter judge_command(\"XGROUP CREATE mykey mygroup\", None) judge_command(\"XGROUP CREATE", "key2\", \"stream_id\": \"2\", }, ) judge_command( \"XREADGROUP GROUP mygroup1 Bob", "~ 2\", { \"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\":", "\"1\", \"block\": \"BLOCK\", \"millisecond\": \"100\", \"noack\": \"NOACK\", \"streams\": \"STREAMS\", \"keys\":", "10 myconsumer\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"stream_id\":", ") judge_command( \"XPENDING mystream group55 - + 10 myconsumer\", {", "somestream 1526985054069 1526985055069\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\",", "\"millisecond\": \"1000\", \"stream_id\": \"0-0\", }, ) def test_xreadgroup(judge_command): judge_command( \"XREADGROUP", "Bob STREAMS key1 1 key2 2\", { \"command\": \"XREADGROUP\", \"stream_group\":", "judge_command(\" XTRIM mystream\", None) def test_xdel(judge_command): judge_command( \"XDEL mystream 1581165000000", "\"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\"}, ) judge_command( \" XTRIM", "\"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\", }, ) judge_command( \"XINFO", "world\", { \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"hello\", \"svalue\": \"world\",", "will be recongized to keys. \"keys\": \"mystream writers 0-0\", \"stream_id\":", "\"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"approximately\": \"~\", \"count\": \"1000\", \"sfield\": \"key\",", "\"maxlen\": \"MAXLEN\", \"count\": \"1000\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\",", "\"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\", }, ) # test", "\"mykey\", \"group\": \"mygroup\", }, ) judge_command(\"XGROUP destroy mykey\", None) judge_command(\"XGROUP", "mygroup Alice 3600000 1526569498055-0 123 456 789\", { \"command\": \"XCLAIM\",", "\"stream_id\": \"1581165000000\"}, ) def test_xclaim(judge_command): judge_command( \"XCLAIM mystream mygroup Alice", "3600000 1526569498055-0 FORCE\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\",", "COUNT 1 BLOCK 100 NOACK STREAMS key1 1 key2 2\",", "FULL\", { \"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\",", "\"xadd mystream 123-123 key value foo bar hello world\", {", "* key value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\",", "3600000 1526569498055-0 123 456 789\", { \"command\": \"XCLAIM\", \"key\": \"mystream\",", "judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 IDEL 300\", {", "mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_setid\": \"SETID\", \"key\": \"mykey\",", "\"mystream\", \"full_const\": \"FULL\", }, ) judge_command( \"XINFO STREAM mystream FULL", "\"value\", \"stream_id\": \"*\", }, ) # spcify stream id judge_command(", "\"stream_id\": [\"1526985054069\", \"1526985055069\"], }, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069-10\",", "mygroup\", None) judge_command(\"XGROUP CREATE mykey\", None) def test_xgroup_setid(judge_command): judge_command( \"XGROUP", "\"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\", \"group\": \"mygroup\", \"consumer\": \"$\",", "\"+\"], \"count\": \"10\", }, ) judge_command( \"XPENDING mystream group55 -", "group55 - + 10\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\":", "[\"1526985054069\", \"1526985055069-10\"], \"count_const\": \"count\", \"count\": \"10\", }, ) def test_xgroup_create(judge_command):", "\"delconsumer\", \"key\": \"mykey\", \"group\": \"mygroup\", \"consumer\": \"myconsumer\", }, ) judge_command(", "\"3600000\", \"stream_id\": \"1526569498055-0\", \"justid\": \"JUSTID\", }, ) def test_xread(judge_command): judge_command(", "1526569498055-0 JUSTID\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\":", "\"XGROUP CREATE mykey mygroup 123\", { \"command\": \"XGROUP\", \"stream_create\": \"CREATE\",", "\"stream_consumers\": \"consumers\", \"key\": \"mystream\", \"group\": \"mygroup\", }, ) judge_command( \"XINFO", "{ \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069\"], }, )", "\"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", }, ) judge_command( \"XCLAIM mystream mygroup", "mystream MAXLEN 2\", {\"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\":", "mykey mygroup $\", None) def test_xgroup_destroy(judge_command): judge_command( \"XGROUP destroy mykey", "\"count\": \"10\", }, ) def test_xgroup_create(judge_command): judge_command( \"XGROUP CREATE mykey", "STREAM mystream FULL\", { \"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\",", "\"$\", }, ) # two subcommand together shouldn't match judge_command(\"XGROUP", "mygroup1 Bob COUNT 1 BLOCK 100 NOACK STREAMS key1 1", ") judge_command(\"XGROUP delconsumer mykey mygroup\", None) def test_xgroup_stream(judge_command): judge_command( \"XACK", "\"key\": \"somestream\", \"stream_id\": [\"-\", \"+\"]}, ) judge_command( \"XRANGE somestream 1526985054069", "mykey mygroup\", { \"command\": \"XGROUP\", \"stream_destroy\": \"destroy\", \"key\": \"mykey\", \"group\":", "0-0 0-0\", { \"command\": \"XREAD\", \"count_const\": \"COUNT\", \"count\": \"2\", \"streams\":", "\"2\", \"approximately\": \"~\", }, ) judge_command(\" XTRIM mystream\", None) def", "two subcommand together shouldn't match judge_command(\"XGROUP CREATE mykey mygroup 123", "\"svalue\": \"value\", \"stream_id\": \"*\", }, ) # test for MAXLEN", "SETID mykey mygroup 123\", { \"command\": \"XGROUP\", \"stream_setid\": \"SETID\", \"key\":", "\"XINFO consumers mystream mygroup\", { \"command\": \"XINFO\", \"stream_consumers\": \"consumers\", \"key\":", "\"group\": \"mygroup\", }, ) judge_command( \"XINFO GROUPS mystream\", {\"command\": \"XINFO\",", "judge_command(\"XGROUP destroy mykey\", None) judge_command(\"XGROUP DESTROY mykey mygroup $\", None)", "judge_command( \"XRANGE somestream 1526985054069 1526985055069-10\", { \"command\": \"XRANGE\", \"key\": \"somestream\",", "\"XPENDING mystream group55 - + 10 myconsumer\", { \"command\": \"XPENDING\",", "\"group\": \"mygroup\", \"stream_id\": \"$\", }, ) # two subcommand together", "\"key\": \"mystream\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"123-123\", }, )", "\"stream\": \"STREAM\", \"key\": \"mystream\"}, ) judge_command(\"XINFO HELP\", {\"command\": \"XINFO\", \"help\":", "SETID mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_setid\": \"SETID\", \"key\":", "\"3600000\", \"stream_id\": \"1526569498055-0\", \"force\": \"FORCE\", }, ) judge_command( \"XCLAIM mystream", "}, ) judge_command( \"XREAD COUNT 2 BLOCK 1000 STREAMS mystream", "be recongized to keys. \"keys\": \"mystream writers 0-0\", \"stream_id\": \"0-0\",", "\"key\": \"mystream\", \"stream_id\": \"1581165000000\"}, ) def test_xclaim(judge_command): judge_command( \"XCLAIM mystream", "\"somestream\", \"stream_id\": [\"-\", \"+\"]}, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069\",", "\"1526569498055-0\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0", "\"consumers\", \"key\": \"mystream\", \"group\": \"mygroup\", }, ) judge_command( \"XINFO GROUPS", "\"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"123\", }, ) judge_command( \"XGROUP", "\"XREAD\", \"count_const\": \"COUNT\", \"count\": \"2\", \"streams\": \"STREAMS\", \"keys\": \"mystream writers", "\"stream_id\": \"1581060831000\"}, ) judge_command( \"XDEL mystream 1581165000000\", {\"command\": \"XDEL\", \"key\":", "mystream mygroup\", None) def test_xinfo_with_full(judge_command): judge_command( \"XINFO STREAM mystream FULL\",", "mystream * key value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\":", "\"group\": \"group1\", \"stream_id\": \"123123\", }, ) judge_command( \"XACK mystream group1", "\"myconsumer\", }, ) judge_command( \"XPENDING mystream group55 - + 10\",", "\"count_const\": \"COUNT\", \"count\": \"2\", \"streams\": \"STREAMS\", \"keys\": \"mystream writers 0-0\",", "\"mystream\", \"group\": \"group55\"}, ) judge_command( \"XPENDING mystream group55 myconsumer\", {", "\"stream_group\": \"GROUP\", \"group\": \"mygroup1\", \"consumer\": \"Bob\", \"streams\": \"STREAMS\", \"keys\": \"key1", "\"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"789\", },", "Alice 3600000 1526569498055-0 FORCE\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\":", "\"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"time\": \"TIME\",", "\"mygroup1\", \"consumer\": \"Bob\", \"count_const\": \"COUNT\", \"count\": \"1\", \"block\": \"BLOCK\", \"millisecond\":", "\"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\", \"stream_id\": [\"-\", \"+\"], \"count\":", "\"1526985055069-10\"], \"count_const\": \"count\", \"count\": \"10\", }, ) def test_xgroup_create(judge_command): judge_command(", "mystream mygroup\", { \"command\": \"XINFO\", \"stream_consumers\": \"consumers\", \"key\": \"mystream\", \"group\":", "$\", { \"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\", \"group\": \"mygroup\",", "mystream\", {\"command\": \"XINFO\", \"stream_groups\": \"GROUPS\", \"key\": \"mystream\"}, ) judge_command( \"XINFO", "\"stream_groups\": \"GROUPS\", \"key\": \"mystream\"}, ) judge_command( \"XINFO STREAM mystream\", {\"command\":", "\"MAXLEN\", \"count\": \"2\"}, ) judge_command( \" XTRIM mystream MAXLEN ~", "NOACK STREAMS key1 1 key2 2\", { \"command\": \"XREADGROUP\", \"stream_group\":", "1581060831000\", {\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581060831000\"}, ) judge_command( \"XDEL", "def test_xpending(judge_command): judge_command( \"XPENDING mystream group55\", {\"command\": \"XPENDING\", \"key\": \"mystream\",", "}, ) judge_command(\"XPENDING mystream group55 - + \", None) def", "judge_command( \"XREAD COUNT 2 STREAMS mystream writers 0-0 0-0\", {", "* key value\", { \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"key\",", "\"full_const\": \"FULL\", \"count_const\": \"count\", \"count\": \"10\", }, ) def test_xpending(judge_command):", "{\"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\"}, ) judge_command(\"XINFO HELP\", {\"command\":", "\"count\": \"7\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000", "mystream FULL\", { \"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\":", "group1 123123\", { \"command\": \"XACK\", \"key\": \"mystream\", \"group\": \"group1\", \"stream_id\":", "BLOCK 100 NOACK STREAMS key1 1 key2 2\", { \"command\":", "group55 - + 10 myconsumer\", { \"command\": \"XPENDING\", \"key\": \"mystream\",", "\"Alice\", \"millisecond\": [\"3600000\", \"300\"], \"stream_id\": \"1526569498055-0\", \"idel\": \"IDEL\", }, )", "mystream group1 123123 111\", {\"command\": \"XACK\", \"key\": \"mystream\", \"group\": \"group1\",", "{ \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\":", "mygroup $\", { \"command\": \"XGROUP\", \"stream_create\": \"CREATE\", \"key\": \"mykey\", \"group\":", "{ \"command\": \"XACK\", \"key\": \"mystream\", \"group\": \"group1\", \"stream_id\": \"123123\", },", "test_xgroup_destroy(judge_command): judge_command( \"XGROUP destroy mykey mygroup\", { \"command\": \"XGROUP\", \"stream_destroy\":", "match judge_command(\"XGROUP CREATE mykey mygroup 123 SETID mykey mygroup $\",", "\"command\": \"XINFO\", \"stream_consumers\": \"consumers\", \"key\": \"mystream\", \"group\": \"mygroup\", }, )", "\"123-123\", }, ) def test_xtrim(judge_command): judge_command( \" XTRIM mystream MAXLEN", "count 10\", { \"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\":", "\"command\": \"XREAD\", \"count_const\": \"COUNT\", \"count\": \"2\", \"streams\": \"STREAMS\", # FIXME", "[\"-\", \"+\"]}, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069\", { \"command\":", "\"XGROUP delconsumer mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\",", ") judge_command( \"XREAD COUNT 2 BLOCK 1000 STREAMS mystream writers", "\"SETID\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"123\", }, ) judge_command(", "{ \"command\": \"XGROUP\", \"stream_setid\": \"SETID\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\":", "together shouldn't match judge_command(\"XGROUP CREATE mykey mygroup 123 SETID mykey", "judge_command(\"XINFO HELP\", {\"command\": \"XINFO\", \"help\": \"HELP\"}) judge_command(\"XINFO consumers mystream mygroup", "}, ) def test_xread(judge_command): judge_command( \"XREAD COUNT 2 STREAMS mystream", "mystream mygroup Alice 3600000 1526569498055-0 FORCE\", { \"command\": \"XCLAIM\", \"key\":", "retrycount 7\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\":", "BLOCK 1000 STREAMS mystream writers 0-0 0-0\", { \"command\": \"XREAD\",", "\"mykey\", \"group\": \"mygroup\", \"stream_id\": \"123\", }, ) judge_command( \"XGROUP CREATE", "\"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], \"count_const\": \"count\", \"count\": \"10\", }, )", "\"mystream\", \"group\": \"group1\", \"stream_id\": \"123123\", }, ) judge_command( \"XACK mystream", "\"stream_id\": [\"-\", \"+\"], \"count\": \"10\", }, ) judge_command( \"XPENDING mystream", "judge_command( \"XREADGROUP GROUP mygroup1 Bob STREAMS key1 1 key2 2\",", "\"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\",", "\"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"hello\", \"svalue\": \"world\", \"stream_id\": \"123-123\",", "\"2\", }, ) judge_command( \"XREADGROUP GROUP mygroup1 Bob STREAMS key1", "\"XACK\", \"key\": \"mystream\", \"group\": \"group1\", \"stream_id\": \"111\"}, ) def test_xinfo(judge_command):", "def test_xrange(judge_command): judge_command( \"XRANGE somestream - +\", {\"command\": \"XRANGE\", \"key\":", "{\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581165000000\"}, ) def test_xclaim(judge_command): judge_command(", "\"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"retrycount\": \"retrycount\", \"count\": \"7\",", "\"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"1000\", \"sfield\": \"key\", \"svalue\": \"value\",", "\"123456789\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0", "\"XREAD COUNT 2 STREAMS mystream writers 0-0 0-0\", { \"command\":", "\"xadd\", \"key\": \"mystream\", \"sfield\": \"hello\", \"svalue\": \"world\", \"stream_id\": \"123-123\", },", "\"mygroup\", \"stream_id\": \"$\", }, ) # short of a parameter", "$\", None) def test_xgroup_destroy(judge_command): judge_command( \"XGROUP destroy mykey mygroup\", {", "\"123-123\", }, ) judge_command( \"xadd mystream 123-123 key value foo", "foo bar hello world\", { \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\":", "\"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 TIME 123456789\", { \"command\":", "$\", None) def test_xgroup_delconsumer(judge_command): judge_command( \"XGROUP delconsumer mykey mygroup myconsumer\",", "mystream 1581165000000\", {\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\": \"1581165000000\"}, ) def", "\"value\", \"stream_id\": \"*\", }, ) # test for MAXLEN option", "\"command\": \"XREAD\", \"count_const\": \"COUNT\", \"count\": \"2\", \"streams\": \"STREAMS\", \"keys\": \"mystream", "judge_command(\"XGROUP delconsumer mykey mygroup\", None) def test_xgroup_stream(judge_command): judge_command( \"XACK mystream", "\"XGROUP\", \"stream_create\": \"CREATE\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"123\", },", "\"XDEL mystream 1581165000000 1549611229000 1581060831000\", {\"command\": \"XDEL\", \"key\": \"mystream\", \"stream_id\":", "1526569498055-0\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\",", "\"command\": \"XREADGROUP\", \"stream_group\": \"GROUP\", \"group\": \"mygroup1\", \"consumer\": \"Bob\", \"count_const\": \"COUNT\",", "\"XINFO GROUPS mystream\", {\"command\": \"XINFO\", \"stream_groups\": \"GROUPS\", \"key\": \"mystream\"}, )", "2 BLOCK 1000 STREAMS mystream writers 0-0 0-0\", { \"command\":", "HELP\", {\"command\": \"XINFO\", \"help\": \"HELP\"}) judge_command(\"XINFO consumers mystream mygroup GROUPS", "def test_xgroup_setid(judge_command): judge_command( \"XGROUP SETID mykey mygroup 123\", { \"command\":", "{ \"command\": \"XINFO\", \"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\", \"count_const\":", ") judge_command( \"XGROUP CREATE mykey mygroup $\", { \"command\": \"XGROUP\",", "\"XGROUP\", \"stream_setid\": \"SETID\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\": \"123\", },", "3600000 1526569498055-0\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\":", "judge_command( \"XPENDING mystream group55 - + 10 myconsumer\", { \"command\":", "judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 FORCE\", { \"command\":", "\"block\": \"BLOCK\", \"millisecond\": \"1000\", \"stream_id\": \"0-0\", }, ) def test_xreadgroup(judge_command):", "\"10\", }, ) judge_command( \"XPENDING mystream group55 - + 10", "for MAXLEN option judge_command( \"xadd mystream MAXLEN 1000 * key", "\"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 retrycount 7\", { \"command\":", "\"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\", \"group\": \"mygroup\", \"consumer\": \"myconsumer\", }, )", "\"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": [\"3600000\", \"300\"], \"stream_id\":", "def test_xinfo_with_full(judge_command): judge_command( \"XINFO STREAM mystream FULL\", { \"command\": \"XINFO\",", "\"mygroup\", \"consumer\": \"$\", }, ) judge_command(\"XGROUP delconsumer mykey mygroup\", None)", ") judge_command( \"XRANGE somestream 1526985054069 1526985055069-10\", { \"command\": \"XRANGE\", \"key\":", "\"+\"]}, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069\", { \"command\": \"XRANGE\",", "\"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\",", "{ \"command\": \"xadd\", \"key\": \"mystream\", \"sfield\": \"hello\", \"svalue\": \"world\", \"stream_id\":", "3600000 1526569498055-0 IDEL 300\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\":", "\"stream_id\": [\"1526985054069\", \"1526985055069-10\"], \"count_const\": \"count\", \"count\": \"10\", }, ) def", "1526985055069-10 count 10\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\",", "mykey mygroup 123 SETID mykey mygroup $\", None) def test_xgroup_destroy(judge_command):", "\"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"justid\": \"JUSTID\", },", "\"command\": \"xadd\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"approximately\": \"~\", \"count\": \"1000\",", ") judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 retrycount 7\",", "\"XREADGROUP\", \"stream_group\": \"GROUP\", \"group\": \"mygroup1\", \"consumer\": \"Bob\", \"streams\": \"STREAMS\", \"keys\":", "\"mygroup\", \"stream_id\": \"123\", }, ) judge_command( \"XGROUP SETID mykey mygroup", "id judge_command( \"xadd mystream 123-123 key value\", { \"command\": \"xadd\",", "}, ) judge_command(\" XTRIM mystream\", None) def test_xdel(judge_command): judge_command( \"XDEL", "parameter judge_command(\"XGROUP CREATE mykey mygroup\", None) judge_command(\"XGROUP CREATE mykey\", None)", "\"myconsumer\", }, ) judge_command(\"XPENDING mystream group55 - + \", None)", ") judge_command( \"XGROUP delconsumer mykey mygroup $\", { \"command\": \"XGROUP\",", "\"stream_id\": \"789\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000", "\"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], }, ) judge_command( \"XRANGE somestream 1526985054069", "\"NOACK\", \"streams\": \"STREAMS\", \"keys\": \"key1 1 key2\", \"stream_id\": \"2\", },", "Alice 3600000 1526569498055-0 123 456 789\", { \"command\": \"XCLAIM\", \"key\":", "\"svalue\": \"value\", \"stream_id\": \"*\", }, ) # spcify stream id", "\"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": [\"3600000\", \"300\"],", "\"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], }, ) judge_command( \"XRANGE", "1526985054069 1526985055069-10\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"],", "CREATE mykey mygroup 123 SETID mykey mygroup $\", None) def", "\"key\": \"mystream\", \"group\": \"group55\", \"stream_id\": [\"-\", \"+\"], \"count\": \"10\", },", "456 789\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\":", "\"7\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0", "\"group55\"}, ) judge_command( \"XPENDING mystream group55 myconsumer\", { \"command\": \"XPENDING\",", "\"group55\", \"stream_id\": [\"-\", \"+\"], \"count\": \"10\", }, ) judge_command( \"XPENDING", "\"STREAMS\", \"keys\": \"key1 1 key2\", \"stream_id\": \"2\", }, ) judge_command(\"XREADGROUP", "\"XPENDING mystream group55\", {\"command\": \"XPENDING\", \"key\": \"mystream\", \"group\": \"group55\"}, )", "\"group\": \"mygroup\", \"consumer\": \"$\", }, ) judge_command(\"XGROUP delconsumer mykey mygroup\",", "\"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"force\": \"FORCE\", }, )", "\"xadd\", \"key\": \"mystream\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\", },", "Alice 3600000 1526569498055-0 IDEL 300\", { \"command\": \"XCLAIM\", \"key\": \"mystream\",", "judge_command( \"XGROUP destroy mykey mygroup\", { \"command\": \"XGROUP\", \"stream_destroy\": \"destroy\",", "- + 10 myconsumer\", { \"command\": \"XPENDING\", \"key\": \"mystream\", \"group\":", "\"xadd\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"approximately\": \"~\", \"count\": \"1000\", \"sfield\":", "\"consumer\": \"Bob\", \"count_const\": \"COUNT\", \"count\": \"1\", \"block\": \"BLOCK\", \"millisecond\": \"100\",", "\"FULL\", \"count_const\": \"count\", \"count\": \"10\", }, ) def test_xpending(judge_command): judge_command(", "\"mystream\", \"group\": \"group55\", \"stream_id\": [\"-\", \"+\"], \"count\": \"10\", }, )", "\"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"time\": \"TIME\", \"timestamp\":", "mystream\", None) def test_xdel(judge_command): judge_command( \"XDEL mystream 1581165000000 1549611229000 1581060831000\",", "mystream writers 0-0 0-0\", { \"command\": \"XREAD\", \"count_const\": \"COUNT\", \"count\":", "\"STREAMS\", \"keys\": \"key1 1 key2\", \"stream_id\": \"2\", }, ) judge_command(", "\"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\", \"count_const\": \"count\", \"count\": \"10\", },", "\"COUNT\", \"count\": \"2\", \"streams\": \"STREAMS\", # FIXME current grammar can't", "}, ) def test_xreadgroup(judge_command): judge_command( \"XREADGROUP GROUP mygroup1 Bob COUNT", "judge_command( \"XGROUP CREATE mykey mygroup 123\", { \"command\": \"XGROUP\", \"stream_create\":", "\"count_const\": \"count\", \"count\": \"10\", }, ) def test_xgroup_create(judge_command): judge_command( \"XGROUP", "judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 TIME 123456789\", {", "0-0\", { \"command\": \"XREAD\", \"count_const\": \"COUNT\", \"count\": \"2\", \"streams\": \"STREAMS\",", "[\"1526985054069\", \"1526985055069\"], }, ) judge_command( \"XRANGE somestream 1526985054069 1526985055069-10\", {", "destroy mykey\", None) judge_command(\"XGROUP DESTROY mykey mygroup $\", None) def", "mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_create\": \"CREATE\", \"key\": \"mykey\",", "\"stream_id\": \"*\", }, ) judge_command( \"xadd mystream * key value\",", "mystream mygroup Alice 3600000 1526569498055-0\", { \"command\": \"XCLAIM\", \"key\": \"mystream\",", "1526985054069 1526985055069-10 count 10\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\":", "CREATE mykey\", None) def test_xgroup_setid(judge_command): judge_command( \"XGROUP SETID mykey mygroup", "1 key2 2\", { \"command\": \"XREADGROUP\", \"stream_group\": \"GROUP\", \"group\": \"mygroup1\",", "keys. \"keys\": \"mystream writers 0-0\", \"stream_id\": \"0-0\", }, ) judge_command(", "mystream group55 - + 10\", { \"command\": \"XPENDING\", \"key\": \"mystream\",", "\" XTRIM mystream MAXLEN 2\", {\"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\":", "\"key\": \"mystream\", \"group\": \"group1\", \"stream_id\": \"123123\", }, ) judge_command( \"XACK", ") judge_command( \"XRANGE somestream 1526985054069 1526985055069-10 count 10\", { \"command\":", "2 STREAMS mystream writers 0-0 0-0\", { \"command\": \"XREAD\", \"count_const\":", "test_xpending(judge_command): judge_command( \"XPENDING mystream group55\", {\"command\": \"XPENDING\", \"key\": \"mystream\", \"group\":", "}, ) # short of a parameter judge_command(\"XGROUP CREATE mykey", "Alice 3600000 1526569498055-0\", { \"command\": \"XCLAIM\", \"key\": \"mystream\", \"group\": \"mygroup\",", "\"COUNT\", \"count\": \"2\", \"streams\": \"STREAMS\", \"keys\": \"mystream writers 0-0\", \"block\":", "\"sfield\": \"hello\", \"svalue\": \"world\", \"stream_id\": \"123-123\", }, ) def test_xtrim(judge_command):", "key1 1 key2 2\", { \"command\": \"XREADGROUP\", \"stream_group\": \"GROUP\", \"group\":", "\"789\", }, ) judge_command( \"XCLAIM mystream mygroup Alice 3600000 1526569498055-0", "key value foo bar hello world\", { \"command\": \"xadd\", \"key\":", "1526985055069-10\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], },", "{ \"command\": \"XGROUP\", \"stream_destroy\": \"destroy\", \"key\": \"mykey\", \"group\": \"mygroup\", },", "None) judge_command(\"XGROUP DESTROY mykey mygroup $\", None) def test_xgroup_delconsumer(judge_command): judge_command(", "tokens # so the ids will be recongized to keys.", "{ \"command\": \"XGROUP\", \"stream_create\": \"CREATE\", \"key\": \"mykey\", \"group\": \"mygroup\", \"stream_id\":", "judge_command( \"xadd mystream MAXLEN 1000 * key value\", { \"command\":", "\"key\": \"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"789\",", "{ \"command\": \"XREAD\", \"count_const\": \"COUNT\", \"count\": \"2\", \"streams\": \"STREAMS\", #", "mykey mygroup myconsumer\", { \"command\": \"XGROUP\", \"stream_delconsumer\": \"delconsumer\", \"key\": \"mykey\",", "\"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069-10\"], }, ) judge_command(", "\"XRANGE somestream 1526985054069 1526985055069-10 count 10\", { \"command\": \"XRANGE\", \"key\":", "\"XGROUP destroy mykey mygroup\", { \"command\": \"XGROUP\", \"stream_destroy\": \"destroy\", \"key\":", "}, ) judge_command( \"xadd mystream 123-123 key value foo bar", "\"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 123 456 789\", {", "\"XGROUP SETID mykey mygroup 123\", { \"command\": \"XGROUP\", \"stream_setid\": \"SETID\",", "\"mystream\", \"group\": \"mygroup\", \"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", \"retrycount\":", "judge_command( \" XTRIM mystream MAXLEN 2\", {\"command\": \"XTRIM\", \"key\": \"mystream\",", "the ids will be recongized to keys. \"keys\": \"mystream writers", "\"count_const\": \"COUNT\", \"count\": \"2\", \"streams\": \"STREAMS\", # FIXME current grammar", "\"stream_id\": \"$\", }, ) # two subcommand together shouldn't match", "\"stream\": \"STREAM\", \"key\": \"mystream\", \"full_const\": \"FULL\", \"count_const\": \"count\", \"count\": \"10\",", "\"10\", \"consumer\": \"myconsumer\", }, ) judge_command(\"XPENDING mystream group55 - +", "judge_command( \"XGROUP SETID mykey mygroup $\", { \"command\": \"XGROUP\", \"stream_setid\":", "somestream 1526985054069 1526985055069-10\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\",", "\"*\", }, ) judge_command( \"xadd mystream * key value\", {", "\"1526569498055-0\", \"retrycount\": \"retrycount\", \"count\": \"7\", }, ) judge_command( \"XCLAIM mystream", "123123 111\", {\"command\": \"XACK\", \"key\": \"mystream\", \"group\": \"group1\", \"stream_id\": \"111\"},", "\"consumer\": \"Alice\", \"millisecond\": \"3600000\", \"stream_id\": \"1526569498055-0\", }, ) judge_command( \"XCLAIM", "\"XINFO STREAM mystream FULL count 10\", { \"command\": \"XINFO\", \"stream\":", "{ \"command\": \"XTRIM\", \"key\": \"mystream\", \"maxlen\": \"MAXLEN\", \"count\": \"2\", \"approximately\":", "\"xadd mystream * key value\", { \"command\": \"xadd\", \"key\": \"mystream\",", ") # spcify stream id judge_command( \"xadd mystream 123-123 key", "\"MAXLEN\", \"count\": \"1000\", \"sfield\": \"key\", \"svalue\": \"value\", \"stream_id\": \"*\", },", "123\", { \"command\": \"XGROUP\", \"stream_setid\": \"SETID\", \"key\": \"mykey\", \"group\": \"mygroup\",", "option judge_command( \"xadd mystream MAXLEN 1000 * key value\", {", "\"value\", \"stream_id\": \"*\", }, ) judge_command( \"xadd mystream * key", "{ \"command\": \"XREAD\", \"count_const\": \"COUNT\", \"count\": \"2\", \"streams\": \"STREAMS\", \"keys\":", "1526985054069 1526985055069\", { \"command\": \"XRANGE\", \"key\": \"somestream\", \"stream_id\": [\"1526985054069\", \"1526985055069\"],", "\"Bob\", \"count_const\": \"COUNT\", \"count\": \"1\", \"block\": \"BLOCK\", \"millisecond\": \"100\", \"noack\":", ") def test_xpending(judge_command): judge_command( \"XPENDING mystream group55\", {\"command\": \"XPENDING\", \"key\":", "123 SETID mykey mygroup $\", None) def test_xgroup_destroy(judge_command): judge_command( \"XGROUP", "mygroup\", { \"command\": \"XGROUP\", \"stream_destroy\": \"destroy\", \"key\": \"mykey\", \"group\": \"mygroup\",", ") judge_command( \"XINFO GROUPS mystream\", {\"command\": \"XINFO\", \"stream_groups\": \"GROUPS\", \"key\":" ]
[ "def test_main(self): with patch('find_forks.find_forks.find_forks', return_value=None) as find_forks_mock: main() sent_args =", "in test_interesting.py.\"\"\" response_mock = MagicMock() response_mock.read = Mock(return_value=json_response) if PY3:", "else: response_mock.code = 200 response_mock.info = Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", '", "None, 'repo': None, 'no_fetch': False}) find_forks_mock.assert_called_once_with(**sent_args) # Test __version__ exceptions.", "'<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"'), )) return response_mock def make_test(self,", "99, 'start_page': 3 } url = 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' % (sent_args['per_page'], sent_args['start_page'])", "'repo': None, 'no_fetch': False}) find_forks_mock.assert_called_once_with(**sent_args) # Test __version__ exceptions. find_forks_mock", "\"\"\"To run this test you'll need to prepare git first,", "'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' % (sent_args['per_page'], sent_args['start_page']) with patch('find_forks.git_wrapper.subprocess.call', return_value=None) as call_mock: with", "disable=no-self-use from __future__ import absolute_import, division, print_function, unicode_literals from os", "absolute_import, division, print_function, unicode_literals from os import path import unittest", "self.assertIsNone(add_forks(url)) class FindForksTest(FindForksCommon): def test_add_forks(self): self.assertIsNone(add_forks('httttps://unavailable!url')) with open(path.join(BASEPATH, 'fixture/response.json'), 'rb')", "import unittest from six import PY3 from find_forks.__init__ import CONFIG", "rel=\"last\"') else: response_mock.code = 200 response_mock.info = Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\",", "patch, MagicMock, Mock # pylint: disable=no-name-in-module else: from mock import", "= 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' % (sent_args['per_page'], sent_args['start_page']) with patch('find_forks.git_wrapper.subprocess.call', return_value=None) as call_mock:", "= MagicMock() response_mock.read = Mock(return_value=json_response) if PY3: response_mock.status = 200", "(sent_args['per_page'], sent_args['start_page']) with patch('find_forks.git_wrapper.subprocess.call', return_value=None) as call_mock: with patch('find_forks.find_forks.add_forks', return_value=None)", "https://github.com/yagmort/symfony1.git git remote add test-origin-3 git@github.com:tjerkw/Android-SlideExpandableListView.git \"\"\" user, repo =", "'find_forks') user, repo = determine_names('test-origin-1') self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'webmoney') user,", "patch('find_forks.git_wrapper.subprocess.call', return_value=None): self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2') urlopen_mock.assert_called_once_with(url, timeout=6) if PY3: response_mock.status =", "200 response_mock.info = Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"'), ))", "find_forks, main from .__init__ import BASEPATH if PY3: from unittest.mock", "test you'll need to prepare git first, run: git remote", "with patch('find_forks.find_forks.find_forks', return_value=None) as find_forks_mock: main() sent_args = CONFIG.copy() sent_args.update({'user':", "return_value=None): self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2') urlopen_mock.assert_called_once_with(url, timeout=6) if PY3: response_mock.status = 404", "import patch, MagicMock, Mock class FindForksCommon(unittest.TestCase): @staticmethod def make_mock(json_response): \"\"\"Used", "patch('find_forks.git_wrapper.subprocess.call', return_value=None) as call_mock: with patch('find_forks.find_forks.add_forks', return_value=None) as add_forks_mock: find_forks(**sent_args)", "PY3: from unittest.mock import patch, MagicMock, Mock # pylint: disable=no-name-in-module", "if PY3: response_mock.status = 200 response_mock.getheader = Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", '", "response_mock = self.make_mock(json_response) self.make_test(response_mock) def test_determine_names(self): \"\"\"To run this test", "MagicMock(side_effect=SystemError()) del find_forks_mock.__version__ modules = { 'find_forks.__init__': find_forks_mock } with", "from .__init__ import BASEPATH if PY3: from unittest.mock import patch,", ".__init__ import BASEPATH if PY3: from unittest.mock import patch, MagicMock,", "\"\"\"Used in test_interesting.py.\"\"\" response_mock = MagicMock() response_mock.read = Mock(return_value=json_response) if", "response_mock.code = 200 response_mock.info = Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>;", "\"\"\"Used in test_interesting.py.\"\"\" url = 'https://github.com/frost-nzcr4/find_forks' with patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock) as", "remote add test-origin-3 git@github.com:tjerkw/Android-SlideExpandableListView.git \"\"\" user, repo = determine_names() self.assertEqual(user,", "find_forks.find_forks import add_forks, determine_names, find_forks, main from .__init__ import BASEPATH", "FindForksTest(FindForksCommon): def test_add_forks(self): self.assertIsNone(add_forks('httttps://unavailable!url')) with open(path.join(BASEPATH, 'fixture/response.json'), 'rb') as fixture:", "json_response = fixture.read() response_mock = self.make_mock(json_response) self.make_test(response_mock) def test_determine_names(self): \"\"\"To", "git@github.com:tjerkw/Android-SlideExpandableListView.git \"\"\" user, repo = determine_names() self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'find_forks')", "= 200 response_mock.info = Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"'),", "patch('find_forks.find_forks.find_forks', return_value=None) as find_forks_mock: main() sent_args = CONFIG.copy() sent_args.update({'user': None,", "self.assertEqual(repo, 'find_forks') user, repo = determine_names('test-origin-1') self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'webmoney')", "= determine_names('test-origin-2') self.assertEqual(user, 'yagmort') self.assertEqual(repo, 'symfony1') user, repo = determine_names('test-origin-3')", "patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock) as urlopen_mock: with patch('find_forks.git_wrapper.subprocess.call', return_value=None): self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2') urlopen_mock.assert_called_once_with(url,", "url = 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' % (sent_args['per_page'], sent_args['start_page']) with patch('find_forks.git_wrapper.subprocess.call', return_value=None) as", "\"\"\"test_find_fork.\"\"\" # pylint: disable=no-self-use from __future__ import absolute_import, division, print_function,", "'<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"'), )) return response_mock def make_test(self, response_mock): \"\"\"Used in", "repo = determine_names('test-origin-1') self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'webmoney') user, repo =", "add_forks_mock: find_forks(**sent_args) add_forks_mock.assert_called_once_with(url) call_mock.assert_called_once() def test_main(self): with patch('find_forks.find_forks.find_forks', return_value=None) as", "print_function, unicode_literals from os import path import unittest from six", "# pylint: disable=no-name-in-module else: from mock import patch, MagicMock, Mock", "repo = determine_names('name-with-an-error') def test_find_forks(self): sent_args = { 'per_page': 99,", "from six import PY3 from find_forks.__init__ import CONFIG from find_forks.find_forks", "import path import unittest from six import PY3 from find_forks.__init__", "test-origin-1 https://github.com/frost-nzcr4/find_forks.git git remote add test-origin-2 https://github.com/yagmort/symfony1.git git remote add", "self.make_mock(json_response) self.make_test(response_mock) def test_determine_names(self): \"\"\"To run this test you'll need", "= Mock(return_value=json_response) if PY3: response_mock.status = 200 response_mock.getheader = Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>;", "add test-origin-2 https://github.com/yagmort/symfony1.git git remote add test-origin-3 git@github.com:tjerkw/Android-SlideExpandableListView.git \"\"\" user,", "as urlopen_mock: with patch('find_forks.git_wrapper.subprocess.call', return_value=None): self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2') urlopen_mock.assert_called_once_with(url, timeout=6) if", "determine_names('test-origin-3') self.assertEqual(user, 'tjerkw') self.assertEqual(repo, 'Android-SlideExpandableListView') with self.assertRaises(RuntimeError): user, repo =", "= MagicMock(side_effect=SystemError()) del find_forks_mock.__version__ modules = { 'find_forks.__init__': find_forks_mock }", "self.assertIsNone(add_forks('httttps://unavailable!url')) with open(path.join(BASEPATH, 'fixture/response.json'), 'rb') as fixture: json_response = fixture.read()", "response_mock = MagicMock() response_mock.read = Mock(return_value=json_response) if PY3: response_mock.status =", "PY3 from find_forks.__init__ import CONFIG from find_forks.find_forks import add_forks, determine_names,", "= { 'per_page': 99, 'start_page': 3 } url = 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s'", "__version__ exceptions. find_forks_mock = MagicMock(side_effect=SystemError()) del find_forks_mock.__version__ modules = {", "'no_fetch': False}) find_forks_mock.assert_called_once_with(**sent_args) # Test __version__ exceptions. find_forks_mock = MagicMock(side_effect=SystemError())", "import CONFIG from find_forks.find_forks import add_forks, determine_names, find_forks, main from", "response_mock.status = 404 else: response_mock.code = 404 self.assertIsNone(add_forks(url)) class FindForksTest(FindForksCommon):", "determine_names('test-origin-1') self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'webmoney') user, repo = determine_names('test-origin-2') self.assertEqual(user,", "test_main(self): with patch('find_forks.find_forks.find_forks', return_value=None) as find_forks_mock: main() sent_args = CONFIG.copy()", "del find_forks_mock.__version__ modules = { 'find_forks.__init__': find_forks_mock } with patch.dict('sys.modules',", "open(path.join(BASEPATH, 'fixture/response.json'), 'rb') as fixture: json_response = fixture.read() response_mock =", "= Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"') else: response_mock.code = 200", "urlopen_mock: with patch('find_forks.git_wrapper.subprocess.call', return_value=None): self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2') urlopen_mock.assert_called_once_with(url, timeout=6) if PY3:", "git remote add test-origin-1 https://github.com/frost-nzcr4/find_forks.git git remote add test-origin-2 https://github.com/yagmort/symfony1.git", "'webmoney') user, repo = determine_names('test-origin-2') self.assertEqual(user, 'yagmort') self.assertEqual(repo, 'symfony1') user,", "\"\"\" user, repo = determine_names() self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'find_forks') user,", "'rb') as fixture: json_response = fixture.read() response_mock = self.make_mock(json_response) self.make_test(response_mock)", "six import PY3 from find_forks.__init__ import CONFIG from find_forks.find_forks import", "find_forks_mock.__version__ modules = { 'find_forks.__init__': find_forks_mock } with patch.dict('sys.modules', modules):", "from mock import patch, MagicMock, Mock class FindForksCommon(unittest.TestCase): @staticmethod def", "determine_names('name-with-an-error') def test_find_forks(self): sent_args = { 'per_page': 99, 'start_page': 3", "find_forks_mock.assert_called_once_with(**sent_args) # Test __version__ exceptions. find_forks_mock = MagicMock(side_effect=SystemError()) del find_forks_mock.__version__", "response_mock.status = 200 response_mock.getheader = Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"')", "from find_forks.__init__ import CONFIG from find_forks.find_forks import add_forks, determine_names, find_forks,", "repo = determine_names('test-origin-3') self.assertEqual(user, 'tjerkw') self.assertEqual(repo, 'Android-SlideExpandableListView') with self.assertRaises(RuntimeError): user,", "first, run: git remote add test-origin-1 https://github.com/frost-nzcr4/find_forks.git git remote add", "BASEPATH if PY3: from unittest.mock import patch, MagicMock, Mock #", "return response_mock def make_test(self, response_mock): \"\"\"Used in test_interesting.py.\"\"\" url =", "def make_mock(json_response): \"\"\"Used in test_interesting.py.\"\"\" response_mock = MagicMock() response_mock.read =", "utf-8 \"\"\"test_find_fork.\"\"\" # pylint: disable=no-self-use from __future__ import absolute_import, division,", "add test-origin-1 https://github.com/frost-nzcr4/find_forks.git git remote add test-origin-2 https://github.com/yagmort/symfony1.git git remote", "with patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock) as urlopen_mock: with patch('find_forks.git_wrapper.subprocess.call', return_value=None): self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2')", "test-origin-3 git@github.com:tjerkw/Android-SlideExpandableListView.git \"\"\" user, repo = determine_names() self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo,", "= determine_names('name-with-an-error') def test_find_forks(self): sent_args = { 'per_page': 99, 'start_page':", "modules = { 'find_forks.__init__': find_forks_mock } with patch.dict('sys.modules', modules): self.assertRaises(ImportError,", "response_mock.info = Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"'), )) return", "False}) find_forks_mock.assert_called_once_with(**sent_args) # Test __version__ exceptions. find_forks_mock = MagicMock(side_effect=SystemError()) del", "in test_interesting.py.\"\"\" url = 'https://github.com/frost-nzcr4/find_forks' with patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock) as urlopen_mock:", "pylint: disable=no-self-use from __future__ import absolute_import, division, print_function, unicode_literals from", "os import path import unittest from six import PY3 from", "response_mock.read = Mock(return_value=json_response) if PY3: response_mock.status = 200 response_mock.getheader =", "'Android-SlideExpandableListView') with self.assertRaises(RuntimeError): user, repo = determine_names('name-with-an-error') def test_find_forks(self): sent_args", "sent_args = CONFIG.copy() sent_args.update({'user': None, 'repo': None, 'no_fetch': False}) find_forks_mock.assert_called_once_with(**sent_args)", "as find_forks_mock: main() sent_args = CONFIG.copy() sent_args.update({'user': None, 'repo': None,", "division, print_function, unicode_literals from os import path import unittest from", "git remote add test-origin-2 https://github.com/yagmort/symfony1.git git remote add test-origin-3 git@github.com:tjerkw/Android-SlideExpandableListView.git", "main() sent_args = CONFIG.copy() sent_args.update({'user': None, 'repo': None, 'no_fetch': False})", "return_value=None) as add_forks_mock: find_forks(**sent_args) add_forks_mock.assert_called_once_with(url) call_mock.assert_called_once() def test_main(self): with patch('find_forks.find_forks.find_forks',", "unittest from six import PY3 from find_forks.__init__ import CONFIG from", "sent_args = { 'per_page': 99, 'start_page': 3 } url =", ")) return response_mock def make_test(self, response_mock): \"\"\"Used in test_interesting.py.\"\"\" url", "'start_page': 3 } url = 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' % (sent_args['per_page'], sent_args['start_page']) with", "unicode_literals from os import path import unittest from six import", "need to prepare git first, run: git remote add test-origin-1", "'per_page': 99, 'start_page': 3 } url = 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' % (sent_args['per_page'],", "find_forks_mock = MagicMock(side_effect=SystemError()) del find_forks_mock.__version__ modules = { 'find_forks.__init__': find_forks_mock", "Mock # pylint: disable=no-name-in-module else: from mock import patch, MagicMock,", "# pylint: disable=no-self-use from __future__ import absolute_import, division, print_function, unicode_literals", "import absolute_import, division, print_function, unicode_literals from os import path import", "as fixture: json_response = fixture.read() response_mock = self.make_mock(json_response) self.make_test(response_mock) def", "https://github.com/frost-nzcr4/find_forks.git git remote add test-origin-2 https://github.com/yagmort/symfony1.git git remote add test-origin-3", "Test __version__ exceptions. find_forks_mock = MagicMock(side_effect=SystemError()) del find_forks_mock.__version__ modules =", "import PY3 from find_forks.__init__ import CONFIG from find_forks.find_forks import add_forks,", "import add_forks, determine_names, find_forks, main from .__init__ import BASEPATH if", "Mock class FindForksCommon(unittest.TestCase): @staticmethod def make_mock(json_response): \"\"\"Used in test_interesting.py.\"\"\" response_mock", "url = 'https://github.com/frost-nzcr4/find_forks' with patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock) as urlopen_mock: with patch('find_forks.git_wrapper.subprocess.call',", "= 404 else: response_mock.code = 404 self.assertIsNone(add_forks(url)) class FindForksTest(FindForksCommon): def", "test_determine_names(self): \"\"\"To run this test you'll need to prepare git", "% (sent_args['per_page'], sent_args['start_page']) with patch('find_forks.git_wrapper.subprocess.call', return_value=None) as call_mock: with patch('find_forks.find_forks.add_forks',", "sent_args['start_page']) with patch('find_forks.git_wrapper.subprocess.call', return_value=None) as call_mock: with patch('find_forks.find_forks.add_forks', return_value=None) as", "sent_args.update({'user': None, 'repo': None, 'no_fetch': False}) find_forks_mock.assert_called_once_with(**sent_args) # Test __version__", "= determine_names() self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'find_forks') user, repo = determine_names('test-origin-1')", "'<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"') else: response_mock.code = 200 response_mock.info = Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>;", "def test_find_forks(self): sent_args = { 'per_page': 99, 'start_page': 3 }", "} url = 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' % (sent_args['per_page'], sent_args['start_page']) with patch('find_forks.git_wrapper.subprocess.call', return_value=None)", "user, repo = determine_names('test-origin-1') self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'webmoney') user, repo", "self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2') urlopen_mock.assert_called_once_with(url, timeout=6) if PY3: response_mock.status = 404 else:", "add test-origin-3 git@github.com:tjerkw/Android-SlideExpandableListView.git \"\"\" user, repo = determine_names() self.assertEqual(user, 'frost-nzcr4')", "as add_forks_mock: find_forks(**sent_args) add_forks_mock.assert_called_once_with(url) call_mock.assert_called_once() def test_main(self): with patch('find_forks.find_forks.find_forks', return_value=None)", "coding: utf-8 \"\"\"test_find_fork.\"\"\" # pylint: disable=no-self-use from __future__ import absolute_import,", "pylint: disable=no-name-in-module else: from mock import patch, MagicMock, Mock class", "self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'webmoney') user, repo = determine_names('test-origin-2') self.assertEqual(user, 'yagmort')", "import patch, MagicMock, Mock # pylint: disable=no-name-in-module else: from mock", "'frost-nzcr4') self.assertEqual(repo, 'webmoney') user, repo = determine_names('test-origin-2') self.assertEqual(user, 'yagmort') self.assertEqual(repo,", "import BASEPATH if PY3: from unittest.mock import patch, MagicMock, Mock", "user, repo = determine_names() self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'find_forks') user, repo", "self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'find_forks') user, repo = determine_names('test-origin-1') self.assertEqual(user, 'frost-nzcr4')", "fixture.read() response_mock = self.make_mock(json_response) self.make_test(response_mock) def test_determine_names(self): \"\"\"To run this", "main from .__init__ import BASEPATH if PY3: from unittest.mock import", "= 200 response_mock.getheader = Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"') else:", "test_find_forks(self): sent_args = { 'per_page': 99, 'start_page': 3 } url", "test_interesting.py.\"\"\" response_mock = MagicMock() response_mock.read = Mock(return_value=json_response) if PY3: response_mock.status", "from __future__ import absolute_import, division, print_function, unicode_literals from os import", "determine_names('test-origin-2') self.assertEqual(user, 'yagmort') self.assertEqual(repo, 'symfony1') user, repo = determine_names('test-origin-3') self.assertEqual(user,", "{ 'per_page': 99, 'start_page': 3 } url = 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' %", "from unittest.mock import patch, MagicMock, Mock # pylint: disable=no-name-in-module else:", "make_test(self, response_mock): \"\"\"Used in test_interesting.py.\"\"\" url = 'https://github.com/frost-nzcr4/find_forks' with patch('find_forks.find_forks.urllib.request.urlopen',", "disable=no-name-in-module else: from mock import patch, MagicMock, Mock class FindForksCommon(unittest.TestCase):", "= determine_names('test-origin-3') self.assertEqual(user, 'tjerkw') self.assertEqual(repo, 'Android-SlideExpandableListView') with self.assertRaises(RuntimeError): user, repo", "rel=\"last\"'), )) return response_mock def make_test(self, response_mock): \"\"\"Used in test_interesting.py.\"\"\"", "CONFIG from find_forks.find_forks import add_forks, determine_names, find_forks, main from .__init__", "from find_forks.find_forks import add_forks, determine_names, find_forks, main from .__init__ import", "call_mock.assert_called_once() def test_main(self): with patch('find_forks.find_forks.find_forks', return_value=None) as find_forks_mock: main() sent_args", "return_value=None) as find_forks_mock: main() sent_args = CONFIG.copy() sent_args.update({'user': None, 'repo':", "'symfony1') user, repo = determine_names('test-origin-3') self.assertEqual(user, 'tjerkw') self.assertEqual(repo, 'Android-SlideExpandableListView') with", "make_mock(json_response): \"\"\"Used in test_interesting.py.\"\"\" response_mock = MagicMock() response_mock.read = Mock(return_value=json_response)", "'https://github.com/frost-nzcr4/find_forks' with patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock) as urlopen_mock: with patch('find_forks.git_wrapper.subprocess.call', return_value=None): self.assertEqual(add_forks(url),", "remote add test-origin-2 https://github.com/yagmort/symfony1.git git remote add test-origin-3 git@github.com:tjerkw/Android-SlideExpandableListView.git \"\"\"", "MagicMock, Mock # pylint: disable=no-name-in-module else: from mock import patch,", "response_mock.getheader = Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"') else: response_mock.code =", "'yagmort') self.assertEqual(repo, 'symfony1') user, repo = determine_names('test-origin-3') self.assertEqual(user, 'tjerkw') self.assertEqual(repo,", "from os import path import unittest from six import PY3", "unittest.mock import patch, MagicMock, Mock # pylint: disable=no-name-in-module else: from", "200 response_mock.getheader = Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"') else: response_mock.code", "with patch('find_forks.git_wrapper.subprocess.call', return_value=None): self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2') urlopen_mock.assert_called_once_with(url, timeout=6) if PY3: response_mock.status", "@staticmethod def make_mock(json_response): \"\"\"Used in test_interesting.py.\"\"\" response_mock = MagicMock() response_mock.read", "404 self.assertIsNone(add_forks(url)) class FindForksTest(FindForksCommon): def test_add_forks(self): self.assertIsNone(add_forks('httttps://unavailable!url')) with open(path.join(BASEPATH, 'fixture/response.json'),", "repo = determine_names('test-origin-2') self.assertEqual(user, 'yagmort') self.assertEqual(repo, 'symfony1') user, repo =", "repo = determine_names() self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'find_forks') user, repo =", "self.assertRaises(RuntimeError): user, repo = determine_names('name-with-an-error') def test_find_forks(self): sent_args = {", "mock import patch, MagicMock, Mock class FindForksCommon(unittest.TestCase): @staticmethod def make_mock(json_response):", "test_add_forks(self): self.assertIsNone(add_forks('httttps://unavailable!url')) with open(path.join(BASEPATH, 'fixture/response.json'), 'rb') as fixture: json_response =", "# Test __version__ exceptions. find_forks_mock = MagicMock(side_effect=SystemError()) del find_forks_mock.__version__ modules", "class FindForksTest(FindForksCommon): def test_add_forks(self): self.assertIsNone(add_forks('httttps://unavailable!url')) with open(path.join(BASEPATH, 'fixture/response.json'), 'rb') as", "3 } url = 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' % (sent_args['per_page'], sent_args['start_page']) with patch('find_forks.git_wrapper.subprocess.call',", "= fixture.read() response_mock = self.make_mock(json_response) self.make_test(response_mock) def test_determine_names(self): \"\"\"To run", "determine_names() self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'find_forks') user, repo = determine_names('test-origin-1') self.assertEqual(user,", "patch('find_forks.find_forks.add_forks', return_value=None) as add_forks_mock: find_forks(**sent_args) add_forks_mock.assert_called_once_with(url) call_mock.assert_called_once() def test_main(self): with", "with patch('find_forks.find_forks.add_forks', return_value=None) as add_forks_mock: find_forks(**sent_args) add_forks_mock.assert_called_once_with(url) call_mock.assert_called_once() def test_main(self):", "PY3: response_mock.status = 200 response_mock.getheader = Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>;", "def test_add_forks(self): self.assertIsNone(add_forks('httttps://unavailable!url')) with open(path.join(BASEPATH, 'fixture/response.json'), 'rb') as fixture: json_response", "'fixture/response.json'), 'rb') as fixture: json_response = fixture.read() response_mock = self.make_mock(json_response)", "self.assertEqual(user, 'tjerkw') self.assertEqual(repo, 'Android-SlideExpandableListView') with self.assertRaises(RuntimeError): user, repo = determine_names('name-with-an-error')", "return_value=None) as call_mock: with patch('find_forks.find_forks.add_forks', return_value=None) as add_forks_mock: find_forks(**sent_args) add_forks_mock.assert_called_once_with(url)", "else: from mock import patch, MagicMock, Mock class FindForksCommon(unittest.TestCase): @staticmethod", "run: git remote add test-origin-1 https://github.com/frost-nzcr4/find_forks.git git remote add test-origin-2", "= Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"'), )) return response_mock", "remote add test-origin-1 https://github.com/frost-nzcr4/find_forks.git git remote add test-origin-2 https://github.com/yagmort/symfony1.git git", "find_forks_mock: main() sent_args = CONFIG.copy() sent_args.update({'user': None, 'repo': None, 'no_fetch':", "test_interesting.py.\"\"\" url = 'https://github.com/frost-nzcr4/find_forks' with patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock) as urlopen_mock: with", "Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"') else: response_mock.code = 200 response_mock.info", "exceptions. find_forks_mock = MagicMock(side_effect=SystemError()) del find_forks_mock.__version__ modules = { 'find_forks.__init__':", "Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"'), )) return response_mock def", "if PY3: response_mock.status = 404 else: response_mock.code = 404 self.assertIsNone(add_forks(url))", "run this test you'll need to prepare git first, run:", "fixture: json_response = fixture.read() response_mock = self.make_mock(json_response) self.make_test(response_mock) def test_determine_names(self):", "= self.make_mock(json_response) self.make_test(response_mock) def test_determine_names(self): \"\"\"To run this test you'll", "user, repo = determine_names('test-origin-3') self.assertEqual(user, 'tjerkw') self.assertEqual(repo, 'Android-SlideExpandableListView') with self.assertRaises(RuntimeError):", "find_forks.__init__ import CONFIG from find_forks.find_forks import add_forks, determine_names, find_forks, main", "call_mock: with patch('find_forks.find_forks.add_forks', return_value=None) as add_forks_mock: find_forks(**sent_args) add_forks_mock.assert_called_once_with(url) call_mock.assert_called_once() def", "PY3: response_mock.status = 404 else: response_mock.code = 404 self.assertIsNone(add_forks(url)) class", "to prepare git first, run: git remote add test-origin-1 https://github.com/frost-nzcr4/find_forks.git", "this test you'll need to prepare git first, run: git", "with self.assertRaises(RuntimeError): user, repo = determine_names('name-with-an-error') def test_find_forks(self): sent_args =", "' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"') else: response_mock.code = 200 response_mock.info = Mock(return_value=(('link',", "with open(path.join(BASEPATH, 'fixture/response.json'), 'rb') as fixture: json_response = fixture.read() response_mock", "return_value=response_mock) as urlopen_mock: with patch('find_forks.git_wrapper.subprocess.call', return_value=None): self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2') urlopen_mock.assert_called_once_with(url, timeout=6)", "self.assertEqual(repo, 'Android-SlideExpandableListView') with self.assertRaises(RuntimeError): user, repo = determine_names('name-with-an-error') def test_find_forks(self):", "'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2') urlopen_mock.assert_called_once_with(url, timeout=6) if PY3: response_mock.status = 404 else: response_mock.code", "user, repo = determine_names('test-origin-2') self.assertEqual(user, 'yagmort') self.assertEqual(repo, 'symfony1') user, repo", "response_mock.code = 404 self.assertIsNone(add_forks(url)) class FindForksTest(FindForksCommon): def test_add_forks(self): self.assertIsNone(add_forks('httttps://unavailable!url')) with", "None, 'no_fetch': False}) find_forks_mock.assert_called_once_with(**sent_args) # Test __version__ exceptions. find_forks_mock =", "rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"') else: response_mock.code = 200 response_mock.info =", "as call_mock: with patch('find_forks.find_forks.add_forks', return_value=None) as add_forks_mock: find_forks(**sent_args) add_forks_mock.assert_called_once_with(url) call_mock.assert_called_once()", "'frost-nzcr4') self.assertEqual(repo, 'find_forks') user, repo = determine_names('test-origin-1') self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo,", "path import unittest from six import PY3 from find_forks.__init__ import", "else: response_mock.code = 404 self.assertIsNone(add_forks(url)) class FindForksTest(FindForksCommon): def test_add_forks(self): self.assertIsNone(add_forks('httttps://unavailable!url'))", "def test_determine_names(self): \"\"\"To run this test you'll need to prepare", "test-origin-2 https://github.com/yagmort/symfony1.git git remote add test-origin-3 git@github.com:tjerkw/Android-SlideExpandableListView.git \"\"\" user, repo", "determine_names, find_forks, main from .__init__ import BASEPATH if PY3: from", "add_forks_mock.assert_called_once_with(url) call_mock.assert_called_once() def test_main(self): with patch('find_forks.find_forks.find_forks', return_value=None) as find_forks_mock: main()", "find_forks(**sent_args) add_forks_mock.assert_called_once_with(url) call_mock.assert_called_once() def test_main(self): with patch('find_forks.find_forks.find_forks', return_value=None) as find_forks_mock:", "<filename>tests/test_find_forks/test_find_forks.py # coding: utf-8 \"\"\"test_find_fork.\"\"\" # pylint: disable=no-self-use from __future__", "user, repo = determine_names('name-with-an-error') def test_find_forks(self): sent_args = { 'per_page':", "__future__ import absolute_import, division, print_function, unicode_literals from os import path", "# coding: utf-8 \"\"\"test_find_fork.\"\"\" # pylint: disable=no-self-use from __future__ import", "self.assertEqual(repo, 'symfony1') user, repo = determine_names('test-origin-3') self.assertEqual(user, 'tjerkw') self.assertEqual(repo, 'Android-SlideExpandableListView')", "class FindForksCommon(unittest.TestCase): @staticmethod def make_mock(json_response): \"\"\"Used in test_interesting.py.\"\"\" response_mock =", "FindForksCommon(unittest.TestCase): @staticmethod def make_mock(json_response): \"\"\"Used in test_interesting.py.\"\"\" response_mock = MagicMock()", "response_mock def make_test(self, response_mock): \"\"\"Used in test_interesting.py.\"\"\" url = 'https://github.com/frost-nzcr4/find_forks'", "MagicMock() response_mock.read = Mock(return_value=json_response) if PY3: response_mock.status = 200 response_mock.getheader", "with patch('find_forks.git_wrapper.subprocess.call', return_value=None) as call_mock: with patch('find_forks.find_forks.add_forks', return_value=None) as add_forks_mock:", "prepare git first, run: git remote add test-origin-1 https://github.com/frost-nzcr4/find_forks.git git", "' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"'), )) return response_mock def make_test(self, response_mock): \"\"\"Used", "MagicMock, Mock class FindForksCommon(unittest.TestCase): @staticmethod def make_mock(json_response): \"\"\"Used in test_interesting.py.\"\"\"", "CONFIG.copy() sent_args.update({'user': None, 'repo': None, 'no_fetch': False}) find_forks_mock.assert_called_once_with(**sent_args) # Test", "if PY3: from unittest.mock import patch, MagicMock, Mock # pylint:", "= 'https://github.com/frost-nzcr4/find_forks' with patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock) as urlopen_mock: with patch('find_forks.git_wrapper.subprocess.call', return_value=None):", "self.assertEqual(repo, 'webmoney') user, repo = determine_names('test-origin-2') self.assertEqual(user, 'yagmort') self.assertEqual(repo, 'symfony1')", "= 404 self.assertIsNone(add_forks(url)) class FindForksTest(FindForksCommon): def test_add_forks(self): self.assertIsNone(add_forks('httttps://unavailable!url')) with open(path.join(BASEPATH,", "= { 'find_forks.__init__': find_forks_mock } with patch.dict('sys.modules', modules): self.assertRaises(ImportError, main)", "self.assertEqual(user, 'yagmort') self.assertEqual(repo, 'symfony1') user, repo = determine_names('test-origin-3') self.assertEqual(user, 'tjerkw')", "rel=\"next\", ' '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel=\"last\"'), )) return response_mock def make_test(self, response_mock):", "patch, MagicMock, Mock class FindForksCommon(unittest.TestCase): @staticmethod def make_mock(json_response): \"\"\"Used in", "= determine_names('test-origin-1') self.assertEqual(user, 'frost-nzcr4') self.assertEqual(repo, 'webmoney') user, repo = determine_names('test-origin-2')", "self.make_test(response_mock) def test_determine_names(self): \"\"\"To run this test you'll need to", "404 else: response_mock.code = 404 self.assertIsNone(add_forks(url)) class FindForksTest(FindForksCommon): def test_add_forks(self):", "response_mock): \"\"\"Used in test_interesting.py.\"\"\" url = 'https://github.com/frost-nzcr4/find_forks' with patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock)", "timeout=6) if PY3: response_mock.status = 404 else: response_mock.code = 404", "git first, run: git remote add test-origin-1 https://github.com/frost-nzcr4/find_forks.git git remote", "'tjerkw') self.assertEqual(repo, 'Android-SlideExpandableListView') with self.assertRaises(RuntimeError): user, repo = determine_names('name-with-an-error') def", "you'll need to prepare git first, run: git remote add", "def make_test(self, response_mock): \"\"\"Used in test_interesting.py.\"\"\" url = 'https://github.com/frost-nzcr4/find_forks' with", "= CONFIG.copy() sent_args.update({'user': None, 'repo': None, 'no_fetch': False}) find_forks_mock.assert_called_once_with(**sent_args) #", "git remote add test-origin-3 git@github.com:tjerkw/Android-SlideExpandableListView.git \"\"\" user, repo = determine_names()", "Mock(return_value=json_response) if PY3: response_mock.status = 200 response_mock.getheader = Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel=\"next\",", "add_forks, determine_names, find_forks, main from .__init__ import BASEPATH if PY3:", "urlopen_mock.assert_called_once_with(url, timeout=6) if PY3: response_mock.status = 404 else: response_mock.code =" ]
[ "if self.dist_fip_count == 0: #remove default route entry device =", "get_floating_ips(self): \"\"\"Filter Floating IPs to be hosted on this agent.\"\"\"", "fip_cidr): \"\"\"Remove floating IP from FIP namespace.\"\"\" floating_ip = fip_cidr.split('/')[0]", "= ('POSTROUTING', '! -i %(interface_name)s ' '! -o %(interface_name)s -m", "snat_idx) except Exception: LOG.exception(_LE('DVR: removed snat failed')) def get_gw_port_host(self): host", "the router if not port: return try: # TODO(mrsmith): optimize", "# first step is to move the deletion of the", "None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair() fip_ns_name", "rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) if self.rtr_fip_subnet is None:", "for p in snat_ports if p['fixed_ips'][0]['subnet_id'] == subnet_id] if match_port:", "# check if this is the last FIP for this", "self.snat_namespace def delete_snat_namespace(self): # TODO(mlavalle): in the near future, this", "- update FIP namespace ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip, ip_cidr) return", "self.get_gw_port_host() == self.host) if not is_this_snat_host: return ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id'])", "3.0 compatibility snat_idx = binascii.crc32(ip_cidr) & 0xffffffff # xor-fold the", "\"\"\"Filter Floating Agent GW port for the external network.\"\"\" fip_ports", "in compliance with the License. You may obtain # a", "sn_port['ip_cidr'], sn_port['mac_address'], interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX) self._set_subnet_arp_info(port) def _dvr_internal_network_removed(self, port): if not", "= ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipd.route.delete_gateway(gateway, table=snat_idx) ns_ipr.rule.delete(ip_cidr, snat_idx,", "i['host'] == self.host] def get_snat_interfaces(self): return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) def get_snat_int_device_name(self,", "that creates a gateway for a dvr. The first step", "= (snat_idx >> 30) ^ (snat_idx & MASK_30) if snat_idx", "def get_floating_ips(self): \"\"\"Filter Floating IPs to be hosted on this", "that another router could # come in and want to", "entry into router namespace for the subnet.\"\"\" port = self._get_internal_port(subnet_id)", "\"\"\"Adds rules and routes for SNAT redirection.\"\"\" try: ip_cidr =", "# code in the L3 agent that removes an external", "[]) def get_snat_int_device_name(self, port_id): long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id return", "ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipd.route.delete_gateway(gateway, table=snat_idx) ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx) except Exception: LOG.exception(_LE('DVR:", "optimize the calls below for bulk calls interface_name = self.get_internal_device_name(port['id'])", "for i in floating_ips if i['host'] == self.host] def get_snat_interfaces(self):", "self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair() fip_ns_name =", "super(DvrRouter, self).__init__(*args, **kwargs) self.agent = agent self.host = host self.floating_ips_dict", "subnet_id, 'add') def _map_internal_interfaces(self, int_port, snat_ports): \"\"\"Return the SNAT port", "neutron.agent.linux import ip_lib from neutron.common import constants as l3_constants from", "info retrieved from Plugin for existing ports.\"\"\" if 'id' not", "= common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip, ip_cidr) return l3_constants.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device, ip_cidr):", "ip_lib.send_garp_for_proxyarp(fip_ns_name, interface_name, floating_ip, self.agent_conf.send_arp_for_ha) # update internal structures self.dist_fip_count =", "32 bits or less but more than the system generated", "for DVR. Remove all the rules. This is safe because", "MASK_30) if snat_idx < 32768: snat_idx = snat_idx + MASK_30", "IP to FIP namespace.\"\"\" floating_ip = fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address']", "is for Python 2.6 and 3.0 compatibility snat_idx = binascii.crc32(ip_cidr)", "a sign that dvr needs two router classes. is_this_snat_host =", "\"\"\"Remove floating IP from FIP namespace.\"\"\" floating_ip = fip_cidr.split('/')[0] rtr_2_fip_name", "I think I'd like a # semaphore to sync creation/deletion", "def _map_internal_interfaces(self, int_port, snat_ports): \"\"\"Return the SNAT port for the", "cidr. For IPv6 generate a crc32 bit hash and xor-fold", "want to start using this namespace while this is #", "interface_name = self.get_internal_device_name(port['id']) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) if operation ==", "another router could # come in and want to start", "in port['subnet']: return subnet_id = port['subnet']['id'] # TODO(Carl) Can we", "to in writing, software # distributed under the License is", "sn_port['mac_address'], interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX) self._set_subnet_arp_info(port) def _dvr_internal_network_removed(self, port): if not self.ex_gw_port:", "fip_2_rtr = self.rtr_fip_subnet.get_pair() fip_ns_name = self.fip_ns.get_name() if floating_ip in self.floating_ips_dict:", "'delete': device.neigh.delete(ip, mac) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE(\"DVR: Failed updating", "= None self.dist_fip_count = None self.snat_namespace = None def get_floating_ips(self):", "or agreed to in writing, software # distributed under the", "update internal structures self.dist_fip_count = self.dist_fip_count + 1 def floating_ip_removed_dist(self,", "fixed_ip = int_port['fixed_ips'][0] subnet_id = fixed_ip['subnet_id'] match_port = [p for", "sn_int): \"\"\"Adds rules and routes for SNAT redirection.\"\"\" try: ip_cidr", "interface_name, action): \"\"\"Configures NAT rules for Floating IPs for DVR.", "= self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair() fip_ns_name = self.fip_ns.get_name()", "port['subnet']: return subnet_id = port['subnet']['id'] # TODO(Carl) Can we eliminate", "this router self.dist_fip_count = self.dist_fip_count - 1 if self.dist_fip_count ==", "Apache License, Version 2.0 (the \"License\"); you may # not", "to sync creation/deletion of this namespace. self.fip_ns.delete() self.fip_ns = None", "GW port for the external network.\"\"\" fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])", "floating_ip, self.agent_conf.send_arp_for_ha) # update internal structures self.dist_fip_count = self.dist_fip_count +", "%s\", self.router['id']) return host def internal_network_added(self, port): super(DvrRouter, self).internal_network_added(port) ex_gw_port", "fip_ns_name = self.fip_ns.get_name() if floating_ip in self.floating_ips_dict: rule_pr = self.floating_ips_dict[floating_ip]", "router namespace for the subnet.\"\"\" port = self._get_internal_port(subnet_id) # update", "update arp entry only if the subnet is attached to", "remove_floating_ip(self, device, ip_cidr): super(DvrRouter, self).remove_floating_ip(device, ip_cidr) self.floating_ip_removed_dist(ip_cidr) def create_snat_namespace(self): #", "(c) 2015 Openstack Foundation # # Licensed under the Apache", "License, Version 2.0 (the \"License\"); you may # not use", "binascii import netaddr from oslo_log import log as logging from", "and interface_name: rule = ('POSTROUTING', '! -i %(interface_name)s ' '!", "not use this file except in compliance with the License.", "router's NAT rules will be in their own namespace. \"\"\"", "interface_name}) self.iptables_manager.ipv4['nat'].add_rule(*rule) self.iptables_manager.apply() def floating_ip_added_dist(self, fip, fip_cidr): \"\"\"Add floating IP", "rule in fip namespace fip_ns_name = self.fip_ns.get_name() rtr_2_fip, _ =", "6: # the crc32 & 0xffffffff is for Python 2.6", "ip, mac, subnet_id, operation): \"\"\"Add or delete arp entry into", "is attached to the router if not port: return try:", "# come in and want to start using this namespace", "& 0xffffffff # xor-fold the hash to reserve upper range", "rules will be in their own namespace. \"\"\" self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat')", "(self.agent_conf.agent_mode == 'dvr_snat' and self.get_gw_port_host() == self.host) if not is_this_snat_host:", "Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE(\"DVR: Failed updating arp entry\")) def _set_subnet_arp_info(self,", "LOG.exception(_LE(\"DVR: Failed updating arp entry\")) def _set_subnet_arp_info(self, port): \"\"\"Set ARP", "namespace. \"\"\" self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat') # Add back the jump to", "generate a crc32 bit hash and xor-fold to 30 bits.", "def add_floating_ip(self, fip, interface_name, device): if not self._add_fip_addr_to_device(fip, device): return", "such. I think I'd like a # semaphore to sync", "'--ctstate DNAT -j ACCEPT' % {'interface_name': interface_name}) self.iptables_manager.ipv4['nat'].add_rule(*rule) self.iptables_manager.apply() def", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "the given internal interface port.\"\"\" fixed_ip = int_port['fixed_ips'][0] subnet_id =", "router_ports = self.router.get(l3_constants.INTERFACE_KEY, []) for port in router_ports: fips =", "\"License\"); you may # not use this file except in", "arp entry into router namespace for the subnet.\"\"\" port =", "or less but more than the system generated entries i.e.", "could end up conflicting on # creating/destroying interfaces and such.", "net.value return snat_idx def _snat_redirect_add(self, gateway, sn_port, sn_int): \"\"\"Adds rules", "self.router['id']) return host def internal_network_added(self, port): super(DvrRouter, self).internal_network_added(port) ex_gw_port =", "common_utils from neutron.i18n import _LE LOG = logging.getLogger(__name__) # xor-folding", "ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) self._set_subnet_info(sn_port) interface_name = self.get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name, sn_port['network_id'],", "= self.router.get(l3_constants.INTERFACE_KEY, []) for port in router_ports: fips = port['fixed_ips']", "xor-fold to 30 bits. Use the freed range to extend", "is to move the creation of the snat namespace here", "import utils as common_utils from neutron.i18n import _LE LOG =", "super(DvrRouter, self).internal_network_removed(port) def get_floating_agent_gw_interface(self, ext_net_id): \"\"\"Filter Floating Agent GW port", "_get_snat_idx(ip_cidr): \"\"\"Generate index for DVR snat rules and route tables.", "as l3_constants from neutron.common import utils as common_utils from neutron.i18n", "is # destroying it. The two could end up conflicting", "netaddr from oslo_log import log as logging from oslo_utils import", "snat namespace here self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'], self.agent_conf, self.driver, self.use_ipv6) self.snat_namespace.create()", "= snat_idx + MASK_30 else: snat_idx = net.value return snat_idx", "import binascii import netaddr from oslo_log import log as logging", "= ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name) ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name) device.route.delete_gateway(str(fip_2_rtr.ip), table=dvr_fip_ns.FIP_RT_TBL) self.fip_ns.local_subnets.release(self.router_id) self.rtr_fip_subnet", "and 3.0 compatibility snat_idx = binascii.crc32(ip_cidr) & 0xffffffff # xor-fold", "_LE LOG = logging.getLogger(__name__) # xor-folding mask used for IPv6", "exception/log? device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) # check if", "{} self.snat_iptables_manager = None # Linklocal subnet for router and", "-o %(interface_name)s -m conntrack ! ' '--ctstate DNAT -j ACCEPT'", "self._dvr_internal_network_removed(port) super(DvrRouter, self).internal_network_removed(port) def get_floating_agent_gw_interface(self, ext_net_id): \"\"\"Filter Floating Agent GW", "rule_pr) self.fip_ns.deallocate_rule_priority(rule_pr) #TODO(rajeev): Handle else case - exception/log? device =", "from neutron.agent.linux import ip_lib from neutron.common import constants as l3_constants", "interface_name: rule = ('POSTROUTING', '! -i %(interface_name)s ' '! -o", "hash to reserve upper range to extend smaller values snat_idx", "self.fip_ns.deallocate_rule_priority(rule_pr) #TODO(rajeev): Handle else case - exception/log? device = ip_lib.IPDevice(fip_2_rtr_name,", "logging from oslo_utils import excutils from neutron.agent.l3 import dvr_fip_ns from", "And add them back if the action is add_rules if", "p in snat_ports if p['fixed_ips'][0]['subnet_id'] == subnet_id] if match_port: return", "self.dist_fip_count == 0: #remove default route entry device = ip_lib.IPDevice(rtr_2_fip_name,", "Plugin for existing ports.\"\"\" if 'id' not in port['subnet']: return", "utils as common_utils from neutron.i18n import _LE LOG = logging.getLogger(__name__)", "= host self.floating_ips_dict = {} self.snat_iptables_manager = None # Linklocal", "last FIP for this router self.dist_fip_count = self.dist_fip_count - 1", "self.floating_ips_dict = {} self.snat_iptables_manager = None # Linklocal subnet for", "use the numeric value of the cidr. For IPv6 generate", "log as logging from oslo_utils import excutils from neutron.agent.l3 import", "as common_utils from neutron.i18n import _LE LOG = logging.getLogger(__name__) #", "self.snat_ports) if not sn_port: return # DVR handling code for", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "table=snat_idx) ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx) ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' 'send_redirects=0' % sn_int])", "return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) def get_snat_int_device_name(self, port_id): long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX +", "future, this method should contain the # code in the", "self.router.get('gw_port_host') if not host: LOG.debug(\"gw_port_host missing from router: %s\", self.router['id'])", "l3_constants.FLOATINGIP_STATUS_ERROR # Special Handling for DVR - update FIP namespace", "host, *args, **kwargs): super(DvrRouter, self).__init__(*args, **kwargs) self.agent = agent self.host", "2.6 and 3.0 compatibility snat_idx = binascii.crc32(ip_cidr) & 0xffffffff #", "processing a router. subnet_ports = self.agent.get_ports_by_subnet(subnet_id) for p in subnet_ports:", "link self.rtr_fip_subnet = None self.dist_fip_count = None self.snat_namespace = None", "self.host) if not is_this_snat_host: return snat_interface = ( self.get_snat_int_device_name(sn_port['id'])) ns_name", "self.host = host self.floating_ips_dict = {} self.snat_iptables_manager = None #", "Version 2.0 (the \"License\"); you may # not use this", "= self.fip_ns.unsubscribe(self.router_id) if is_last: # TODO(Carl) I can't help but", "Floating Agent GW port for the external network.\"\"\" fip_ports =", "= self.floating_ips_dict[floating_ip] ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) self.fip_ns.deallocate_rule_priority(rule_pr) #TODO(rajeev):", "router and floating IP namespace link self.rtr_fip_subnet = None self.dist_fip_count", "= self.fip_ns.get_name() if floating_ip in self.floating_ips_dict: rule_pr = self.floating_ips_dict[floating_ip] ip_rule", "self.floating_ips_dict[floating_ip] ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) self.fip_ns.deallocate_rule_priority(rule_pr) #TODO(rajeev): Handle", "dvr_snat_ns.SnatNamespace(self.router['id'], self.agent_conf, self.driver, self.use_ipv6) self.snat_namespace.create() return self.snat_namespace def delete_snat_namespace(self): #", "routes for SNAT redirection.\"\"\" try: ip_cidr = sn_port['ip_cidr'] snat_idx =", "is safe because if use_namespaces is set as False then", "the near future, this method should contain the # code", "dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import router_info as", "self.use_ipv6) self.snat_namespace.create() return self.snat_namespace def delete_snat_namespace(self): # TODO(mlavalle): in the", "dvr needs two router classes. is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat'", "network.\"\"\" fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, []) return next( (p for p", "fixed_ip['subnet_id'] match_port = [p for p in snat_ports if p['fixed_ips'][0]['subnet_id']", "under the License. import binascii import netaddr from oslo_log import", "ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) #Add routing rule in", "LOG.error(_LE('DVR: no map match_port found!')) @staticmethod def _get_snat_idx(ip_cidr): \"\"\"Generate index", "in fip namespace fip_ns_name = self.fip_ns.get_name() rtr_2_fip, _ = self.rtr_fip_subnet.get_pair()", "% {'interface_name': interface_name}) self.iptables_manager.ipv4['nat'].add_rule(*rule) self.iptables_manager.apply() def floating_ip_added_dist(self, fip, fip_cidr): \"\"\"Add", "_snat_redirect_add(self, gateway, sn_port, sn_int): \"\"\"Adds rules and routes for SNAT", "from router: %s\", self.router['id']) return host def internal_network_added(self, port): super(DvrRouter,", "p['fixed_ips'][0]['subnet_id'] == subnet_id] if match_port: return match_port[0] else: LOG.error(_LE('DVR: no", "**kwargs) self.agent = agent self.host = host self.floating_ips_dict = {}", "self.dist_fip_count = self.dist_fip_count - 1 if self.dist_fip_count == 0: #remove", "floating_ip_added_dist(self, fip, fip_cidr): \"\"\"Add floating IP to FIP namespace.\"\"\" floating_ip", "namespace fip_ns_name = self.fip_ns.get_name() rtr_2_fip, _ = self.rtr_fip_subnet.get_pair() device =", "compliance with the License. You may obtain # a copy", "snat_idx) ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' 'send_redirects=0' % sn_int]) except Exception: LOG.exception(_LE('DVR:", "(snat_idx >> 30) ^ (snat_idx & MASK_30) if snat_idx <", "ip_lib.IPWrapper(namespace=fip_ns_name) device.route.delete_gateway(str(fip_2_rtr.ip), table=dvr_fip_ns.FIP_RT_TBL) self.fip_ns.local_subnets.release(self.router_id) self.rtr_fip_subnet = None ns_ip.del_veth(fip_2_rtr_name) is_last =", "For IPv6 generate a crc32 bit hash and xor-fold to", "import router_info as router from neutron.agent.linux import ip_lib from neutron.common", "gateway for a dvr. The first step # is to", "namespace ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip, ip_cidr) return l3_constants.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self,", "agent that creates a gateway for a dvr. The first", "self.get_internal_device_name(port['id']) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) if operation == 'add': device.neigh.add(ip,", "in subnet_ports: if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS: for fixed_ip in", "from oslo_log import log as logging from oslo_utils import excutils", "subnet_id = port['subnet']['id'] # TODO(Carl) Can we eliminate the need", "# # Unless required by applicable law or agreed to", "None self.dist_fip_count = None self.snat_namespace = None def get_floating_ips(self): \"\"\"Filter", "_snat_redirect_remove(self, gateway, sn_port, sn_int): \"\"\"Removes rules and routes for SNAT", "step is to move the deletion of the snat namespace", "will be in their own namespace. \"\"\" self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat') #", "oslo_utils import excutils from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import", "reserve upper range to extend smaller values snat_idx = (snat_idx", "subnet is attached to the router if not port: return", "self.floating_ip_removed_dist(ip_cidr) def create_snat_namespace(self): # TODO(mlavalle): in the near future, this", "ns_name = self.snat_namespace.name prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface, namespace=ns_name): self.driver.unplug(snat_interface,", "self.fip_ns.allocate_rule_priority() self.floating_ips_dict[floating_ip] = rule_pr fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) ip_rule = ip_lib.IPRule(namespace=self.ns_name)", "ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx) ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' 'send_redirects=0' % sn_int]) except", "import constants as l3_constants from neutron.common import utils as common_utils", "add them back if the action is add_rules if action", "FIP for this router self.dist_fip_count = self.dist_fip_count - 1 if", "one router, otherwise each router's NAT rules will be in", "from neutron.i18n import _LE LOG = logging.getLogger(__name__) # xor-folding mask", "from Plugin for existing ports.\"\"\" if 'id' not in port['subnet']:", "sn_int): \"\"\"Removes rules and routes for SNAT redirection.\"\"\" try: ip_cidr", "be in their own namespace. \"\"\" self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat') # Add", "interface_name, floating_ip, self.agent_conf.send_arp_for_ha) # update internal structures self.dist_fip_count = self.dist_fip_count", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair()", "to move the deletion of the snat namespace here self.snat_namespace.delete()", "dvr_snat_ns from neutron.agent.l3 import router_info as router from neutron.agent.linux import", "#Add routing rule in fip namespace fip_ns_name = self.fip_ns.get_name() rtr_2_fip,", "return # DVR handling code for SNAT interface_name = self.get_internal_device_name(port['id'])", "ns_ipd.route.add_gateway(gateway, table=snat_idx) ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx) ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' 'send_redirects=0' %", "self.driver, self.use_ipv6) self.snat_namespace.create() return self.snat_namespace def delete_snat_namespace(self): # TODO(mlavalle): in", "\"\"\"Add or delete arp entry into router namespace for the", "bulk calls interface_name = self.get_internal_device_name(port['id']) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) if", "ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipd.route.delete_gateway(gateway, table=snat_idx) ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx)", "[]) for port in router_ports: fips = port['fixed_ips'] for f", "for a dvr. The # first step is to move", "self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], subnet_id, 'add') def _map_internal_interfaces(self, int_port, snat_ports): \"\"\"Return the", "retrieved from Plugin for existing ports.\"\"\" if 'id' not in", "may obtain # a copy of the License at #", "Unless required by applicable law or agreed to in writing,", "self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, []) return next( (p for p in fip_ports if", "rule_pr fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)", "self.rtr_fip_subnet.get_pair() device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) interface_name = (", "interface_name = ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) ip_lib.send_garp_for_proxyarp(fip_ns_name, interface_name, floating_ip, self.agent_conf.send_arp_for_ha) #", "# Linklocal subnet for router and floating IP namespace link", "from oslo_utils import excutils from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3", "if not sn_port: return # DVR handling code for SNAT", "a dvr. The # first step is to move the", "is add_rules if action == 'add_rules' and interface_name: rule =", "removed snat failed')) def get_gw_port_host(self): host = self.router.get('gw_port_host') if not", "extend smaller values so that they become greater than system", "fip_cidr.split('/')[0] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) if self.rtr_fip_subnet is", "index value has to be 32 bits or less but", "internal_network_added(self, port): super(DvrRouter, self).internal_network_added(port) ex_gw_port = self.get_ex_gw_port() if not ex_gw_port:", "= ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) ip_lib.send_garp_for_proxyarp(fip_ns_name, interface_name, floating_ip, self.agent_conf.send_arp_for_ha) # update", "self.floating_ips_dict: rule_pr = self.floating_ips_dict[floating_ip] ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)", "%(interface_name)s ' '! -o %(interface_name)s -m conntrack ! ' '--ctstate", "if i['host'] == self.host] def get_snat_interfaces(self): return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) def", "router_ports: fips = port['fixed_ips'] for f in fips: if f['subnet_id']", "^ (snat_idx & MASK_30) if snat_idx < 32768: snat_idx =", "self.host] def get_snat_interfaces(self): return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) def get_snat_int_device_name(self, port_id): long_name", "if f['subnet_id'] == subnet_id: return port def _update_arp_entry(self, ip, mac,", "self.snat_namespace.delete() self.snat_namespace = None def _get_internal_port(self, subnet_id): \"\"\"Return internal router", "only if the subnet is attached to the router if", "add_floating_ip(self, fip, interface_name, device): if not self._add_fip_addr_to_device(fip, device): return l3_constants.FLOATINGIP_STATUS_ERROR", "is_this_snat_host: return ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) self._set_subnet_info(sn_port) interface_name = self.get_snat_int_device_name(sn_port['id']) self._internal_network_added(", "self.dist_fip_count = self.dist_fip_count + 1 def floating_ip_removed_dist(self, fip_cidr): \"\"\"Remove floating", "either express or implied. See the # License for the", "IPs to be hosted on this agent.\"\"\" floating_ips = super(DvrRouter,", "may # not use this file except in compliance with", "IP namespace link self.rtr_fip_subnet = None self.dist_fip_count = None self.snat_namespace", "self.dist_fip_count + 1 def floating_ip_removed_dist(self, fip_cidr): \"\"\"Remove floating IP from", "Handle else case - exception/log? device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.delete_route(fip_cidr,", "'add_rules' and interface_name: rule = ('POSTROUTING', '! -i %(interface_name)s '", "rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair() fip_ns_name = self.fip_ns.get_name() if floating_ip in", "the # code in the L3 agent that creates a", "interface_name, device): if not self._add_fip_addr_to_device(fip, device): return l3_constants.FLOATINGIP_STATUS_ERROR # Special", "30) ^ (snat_idx & MASK_30) if snat_idx < 32768: snat_idx", "the jump to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') # And add", "values snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30)", "port_id return long_name[:self.driver.DEV_NAME_LEN] def _handle_fip_nat_rules(self, interface_name, action): \"\"\"Configures NAT rules", "self.rtr_fip_subnet.get_pair() fip_ns_name = self.fip_ns.get_name() if floating_ip in self.floating_ips_dict: rule_pr =", "self._get_snat_idx(ip_cidr) ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipd.route.delete_gateway(gateway, table=snat_idx)", "_update_arp_entry(self, ip, mac, subnet_id, operation): \"\"\"Add or delete arp entry", "is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and self.ex_gw_port['binding:host_id'] == self.host) if", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS: for fixed_ip in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'],", "subnet_id): \"\"\"Return internal router port based on subnet_id.\"\"\" router_ports =", "sn_port: return interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) # TODO(Carl)", "fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(", "import _LE LOG = logging.getLogger(__name__) # xor-folding mask used for", "index MASK_30 = 0x3fffffff class DvrRouter(router.RouterInfo): def __init__(self, agent, host,", "and limitations # under the License. import binascii import netaddr", "ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) #Add routing rule in fip namespace fip_ns_name", "# TODO(mlavalle): in the near future, this method should contain", "tables. The index value has to be 32 bits or", "case - exception/log? device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) #", "calls interface_name = self.get_internal_device_name(port['id']) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) if operation", "port): super(DvrRouter, self).internal_network_added(port) ex_gw_port = self.get_ex_gw_port() if not ex_gw_port: return", "is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and self.get_gw_port_host() == self.host) if", "# processing a router. subnet_ports = self.agent.get_ports_by_subnet(subnet_id) for p in", "think that another router could # come in and want", "= dvr_snat_ns.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface, namespace=ns_name): self.driver.unplug(snat_interface, namespace=ns_name, prefix=prefix) def internal_network_removed(self,", "Failed updating arp entry\")) def _set_subnet_arp_info(self, port): \"\"\"Set ARP info", "if not is_this_snat_host: return ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) self._set_subnet_info(sn_port) interface_name =", "= logging.getLogger(__name__) # xor-folding mask used for IPv6 rule index", "ns_ip.del_veth(fip_2_rtr_name) is_last = self.fip_ns.unsubscribe(self.router_id) if is_last: # TODO(Carl) I can't", "None def _get_internal_port(self, subnet_id): \"\"\"Return internal router port based on", "ex_gw_port: return snat_ports = self.get_snat_interfaces() sn_port = self._map_internal_interfaces(port, snat_ports) if", "the external network.\"\"\" fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, []) return next( (p", "super(DvrRouter, self).remove_floating_ip(device, ip_cidr) self.floating_ip_removed_dist(ip_cidr) def create_snat_namespace(self): # TODO(mlavalle): in the", "subnet_ports: if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS: for fixed_ip in p['fixed_ips']:", "= None def get_floating_ips(self): \"\"\"Filter Floating IPs to be hosted", "= None def _get_internal_port(self, subnet_id): \"\"\"Return internal router port based", "self.get_ex_gw_port() if not ex_gw_port: return snat_ports = self.get_snat_interfaces() sn_port =", "contain the # code in the L3 agent that creates", "ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name) ns_ipd.route.add_gateway(gateway, table=snat_idx)", "creation of the snat namespace here self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'], self.agent_conf,", "TODO(mrsmith): optimize the calls below for bulk calls interface_name =", "self.fip_ns.unsubscribe(self.router_id) if is_last: # TODO(Carl) I can't help but think", "self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, fip_2_rtr =", "creates a gateway for a dvr. The first step #", "# xor-fold the hash to reserve upper range to extend", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "external gateway for a dvr. The # first step is", "the snat namespace here self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'], self.agent_conf, self.driver, self.use_ipv6)", "system generated entries i.e. 32768. For IPv4 use the numeric", "a # semaphore to sync creation/deletion of this namespace. self.fip_ns.delete()", "ns_name, sn_port['network_id'], sn_port['id'], sn_port['ip_cidr'], sn_port['mac_address'], interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX) self._set_subnet_arp_info(port) def _dvr_internal_network_removed(self,", "update FIP namespace ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip, ip_cidr) return l3_constants.FLOATINGIP_STATUS_ACTIVE", "return ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) self._set_subnet_info(sn_port) interface_name = self.get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name,", "TODO(Carl) I can't help but think that another router could", "the snat namespace here self.snat_namespace.delete() self.snat_namespace = None def _get_internal_port(self,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "needs two router classes. is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and", "if not self._add_fip_addr_to_device(fip, device): return l3_constants.FLOATINGIP_STATUS_ERROR # Special Handling for", "== self.host) if not is_this_snat_host: return snat_interface = ( self.get_snat_int_device_name(sn_port['id']))", "def _get_snat_idx(ip_cidr): \"\"\"Generate index for DVR snat rules and route", "routing rule in fip namespace fip_ns_name = self.fip_ns.get_name() rtr_2_fip, _", "near future, this method should contain the # code in", "internal interface port.\"\"\" fixed_ip = int_port['fixed_ips'][0] subnet_id = fixed_ip['subnet_id'] match_port", "the crc32 & 0xffffffff is for Python 2.6 and 3.0", "of the snat namespace here self.snat_namespace.delete() self.snat_namespace = None def", "creating/destroying interfaces and such. I think I'd like a #", "! ' '--ctstate DNAT -j ACCEPT' % {'interface_name': interface_name}) self.iptables_manager.ipv4['nat'].add_rule(*rule)", "TODO(mlavalle): in the near future, this method should contain the", "deletion of the snat namespace here self.snat_namespace.delete() self.snat_namespace = None", "not is_this_snat_host: return ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) self._set_subnet_info(sn_port) interface_name = self.get_snat_int_device_name(sn_port['id'])", "\"\"\"Return the SNAT port for the given internal interface port.\"\"\"", "DVR snat rules and route tables. The index value has", "the L3 agent that removes an external gateway for a", "self.snat_namespace = None def get_floating_ips(self): \"\"\"Filter Floating IPs to be", "index for DVR snat rules and route tables. The index", "if match_port: return match_port[0] else: LOG.error(_LE('DVR: no map match_port found!'))", "dvr. The first step # is to move the creation", "str(rtr_2_fip.ip)) # check if this is the last FIP for", "upper range to extend smaller values snat_idx = (snat_idx >>", "the specific language governing permissions and limitations # under the", "snat_idx < 32768: snat_idx = snat_idx + MASK_30 else: snat_idx", "TODO(Carl) This is a sign that dvr needs two router", "come in and want to start using this namespace while", "under the Apache License, Version 2.0 (the \"License\"); you may", "neutron.agent.l3 import router_info as router from neutron.agent.linux import ip_lib from", "TODO(Carl) Can we eliminate the need to make this RPC", "operation == 'add': device.neigh.add(ip, mac) elif operation == 'delete': device.neigh.delete(ip,", "that dvr needs two router classes. is_this_snat_host = (self.agent_conf.agent_mode ==", "port['fixed_ips'] for f in fips: if f['subnet_id'] == subnet_id: return", "self.router_id) rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair() fip_ns_name = self.fip_ns.get_name() if floating_ip", "here self.snat_namespace.delete() self.snat_namespace = None def _get_internal_port(self, subnet_id): \"\"\"Return internal", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= ip_lib.IPWrapper(namespace=self.ns_name) ns_ipd.route.add_gateway(gateway, table=snat_idx) ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx) ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.'", "if not port: return try: # TODO(mrsmith): optimize the calls", "of this namespace. self.fip_ns.delete() self.fip_ns = None def add_floating_ip(self, fip,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "arp entry only if the subnet is attached to the", "rule index MASK_30 = 0x3fffffff class DvrRouter(router.RouterInfo): def __init__(self, agent,", "ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' 'send_redirects=0' % sn_int]) except Exception: LOG.exception(_LE('DVR: error", "device): return l3_constants.FLOATINGIP_STATUS_ERROR # Special Handling for DVR - update", "_set_subnet_arp_info(self, port): \"\"\"Set ARP info retrieved from Plugin for existing", "ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) self.fip_ns.deallocate_rule_priority(rule_pr) #TODO(rajeev): Handle else case - exception/log?", "required by applicable law or agreed to in writing, software", "= self.fip_ns.get_rtr_ext_device_name(self.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) if self.rtr_fip_subnet is None: self.rtr_fip_subnet", "Floating IPs for DVR. Remove all the rules. This is", "snat_idx = snat_idx + MASK_30 else: snat_idx = net.value return", "this namespace. self.fip_ns.delete() self.fip_ns = None def add_floating_ip(self, fip, interface_name,", "match_port[0] else: LOG.error(_LE('DVR: no map match_port found!')) @staticmethod def _get_snat_idx(ip_cidr):", "namespace here self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'], self.agent_conf, self.driver, self.use_ipv6) self.snat_namespace.create() return", "of the cidr. For IPv6 generate a crc32 bit hash", "agreed to in writing, software # distributed under the License", "agent, host, *args, **kwargs): super(DvrRouter, self).__init__(*args, **kwargs) self.agent = agent", "distributed under the License is distributed on an \"AS IS\"", "= dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id return long_name[:self.driver.DEV_NAME_LEN] def _handle_fip_nat_rules(self, interface_name, action):", "LOG = logging.getLogger(__name__) # xor-folding mask used for IPv6 rule", "CONDITIONS OF ANY KIND, either express or implied. See the", "get_snat_interfaces(self): return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) def get_snat_int_device_name(self, port_id): long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX", "to 30 bits. Use the freed range to extend smaller", "from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import router_info as router", "creation/deletion of this namespace. self.fip_ns.delete() self.fip_ns = None def add_floating_ip(self,", "for fixed_ip in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], subnet_id, 'add') def _map_internal_interfaces(self,", "for f in fips: if f['subnet_id'] == subnet_id: return port", "from neutron.common import utils as common_utils from neutron.i18n import _LE", "\"\"\"Removes rules and routes for SNAT redirection.\"\"\" try: ip_cidr =", "long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id return long_name[:self.driver.DEV_NAME_LEN] def _handle_fip_nat_rules(self, interface_name,", ">> 30) ^ (snat_idx & MASK_30) if snat_idx < 32768:", "an external gateway for a dvr. The # first step", "-m conntrack ! ' '--ctstate DNAT -j ACCEPT' % {'interface_name':", "that removes an external gateway for a dvr. The #", "self.ex_gw_port['binding:host_id'] == self.host) if not is_this_snat_host: return snat_interface = (", "if ip_lib.device_exists(snat_interface, namespace=ns_name): self.driver.unplug(snat_interface, namespace=ns_name, prefix=prefix) def internal_network_removed(self, port): self._dvr_internal_network_removed(port)", "0xffffffff is for Python 2.6 and 3.0 compatibility snat_idx =", "port, interface_name) is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and self.ex_gw_port['binding:host_id'] ==", "think I'd like a # semaphore to sync creation/deletion of", "sn_port, sn_int): \"\"\"Removes rules and routes for SNAT redirection.\"\"\" try:", "_handle_fip_nat_rules(self, interface_name, action): \"\"\"Configures NAT rules for Floating IPs for", "can only configure one router, otherwise each router's NAT rules", "\"\"\" self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat') # Add back the jump to float-snat", "in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], subnet_id, 'add') def _map_internal_interfaces(self, int_port, snat_ports):", "License. import binascii import netaddr from oslo_log import log as", "= self._get_internal_port(subnet_id) # update arp entry only if the subnet", "snat_ports): \"\"\"Return the SNAT port for the given internal interface", "their own namespace. \"\"\" self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat') # Add back the", "long_name[:self.driver.DEV_NAME_LEN] def _handle_fip_nat_rules(self, interface_name, action): \"\"\"Configures NAT rules for Floating", "def _snat_redirect_remove(self, gateway, sn_port, sn_int): \"\"\"Removes rules and routes for", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "arp entry\")) def _set_subnet_arp_info(self, port): \"\"\"Set ARP info retrieved from", "_get_internal_port(self, subnet_id): \"\"\"Return internal router port based on subnet_id.\"\"\" router_ports", "_dvr_internal_network_removed(self, port): if not self.ex_gw_port: return sn_port = self._map_internal_interfaces(port, self.snat_ports)", "= self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat'", "numeric value of the cidr. For IPv6 generate a crc32", "next( (p for p in fip_ports if p['network_id'] == ext_net_id),", "# TODO(Carl) This is a sign that dvr needs two", "updating arp entry\")) def _set_subnet_arp_info(self, port): \"\"\"Set ARP info retrieved", "rules and routes for SNAT redirection.\"\"\" try: ip_cidr = sn_port['ip_cidr']", "entries. \"\"\" net = netaddr.IPNetwork(ip_cidr) if net.version == 6: #", "dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id return long_name[:self.driver.DEV_NAME_LEN] def _handle_fip_nat_rules(self, interface_name, action): \"\"\"Configures", "gateway, sn_port, sn_int): \"\"\"Adds rules and routes for SNAT redirection.\"\"\"", "namespace. self.fip_ns.delete() self.fip_ns = None def add_floating_ip(self, fip, interface_name, device):", "= self.get_snat_interfaces() sn_port = self._map_internal_interfaces(port, snat_ports) if not sn_port: return", "See the # License for the specific language governing permissions", "in the near future, this method should contain the #", "Can we eliminate the need to make this RPC while", "this RPC while # processing a router. subnet_ports = self.agent.get_ports_by_subnet(subnet_id)", "= None ns_ip.del_veth(fip_2_rtr_name) is_last = self.fip_ns.unsubscribe(self.router_id) if is_last: # TODO(Carl)", "law or agreed to in writing, software # distributed under", "not host: LOG.debug(\"gw_port_host missing from router: %s\", self.router['id']) return host", "False then the agent can only configure one router, otherwise", "== subnet_id] if match_port: return match_port[0] else: LOG.error(_LE('DVR: no map", "elif operation == 'delete': device.neigh.delete(ip, mac) except Exception: with excutils.save_and_reraise_exception():", "be 32 bits or less but more than the system", "fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, []) return next( (p for p in", "first step # is to move the creation of the", "port def _update_arp_entry(self, ip, mac, subnet_id, operation): \"\"\"Add or delete", "ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipd.route.delete_gateway(gateway, table=snat_idx) ns_ipr.rule.delete(ip_cidr,", "self._set_subnet_arp_info(port) def _dvr_internal_network_removed(self, port): if not self.ex_gw_port: return sn_port =", "ip_lib.IPDevice(interface_name, namespace=self.ns_name) if operation == 'add': device.neigh.add(ip, mac) elif operation", "subnet_id = fixed_ip['subnet_id'] match_port = [p for p in snat_ports", "dvr_snat_ns.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface, namespace=ns_name): self.driver.unplug(snat_interface, namespace=ns_name, prefix=prefix) def internal_network_removed(self, port):", "for bulk calls interface_name = self.get_internal_device_name(port['id']) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)", "excutils from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from", "# semaphore to sync creation/deletion of this namespace. self.fip_ns.delete() self.fip_ns", "= self._map_internal_interfaces(port, snat_ports) if not sn_port: return interface_name = self.get_internal_device_name(port['id'])", "agent self.host = host self.floating_ips_dict = {} self.snat_iptables_manager = None", "None def add_floating_ip(self, fip, interface_name, device): if not self._add_fip_addr_to_device(fip, device):", "def internal_network_added(self, port): super(DvrRouter, self).internal_network_added(port) ex_gw_port = self.get_ex_gw_port() if not", "for a dvr. The first step # is to move", "while # processing a router. subnet_ports = self.agent.get_ports_by_subnet(subnet_id) for p", "# # Licensed under the Apache License, Version 2.0 (the", "but more than the system generated entries i.e. 32768. For", "self.fip_ns.agent_gateway_port['id'])) ip_lib.send_garp_for_proxyarp(fip_ns_name, interface_name, floating_ip, self.agent_conf.send_arp_for_ha) # update internal structures self.dist_fip_count", "# TODO(mrsmith): optimize the calls below for bulk calls interface_name", "if is_last: # TODO(Carl) I can't help but think that", "step # is to move the creation of the snat", "dvr_snat_ns.SNAT_INT_DEV_PREFIX) self._set_subnet_arp_info(port) def _dvr_internal_network_removed(self, port): if not self.ex_gw_port: return sn_port", "ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name) device.route.delete_gateway(str(fip_2_rtr.ip), table=dvr_fip_ns.FIP_RT_TBL) self.fip_ns.local_subnets.release(self.router_id) self.rtr_fip_subnet = None ns_ip.del_veth(fip_2_rtr_name)", "ip_cidr) return l3_constants.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device, ip_cidr): super(DvrRouter, self).remove_floating_ip(device, ip_cidr)", "and xor-fold to 30 bits. Use the freed range to", "is to move the deletion of the snat namespace here", "from neutron.common import constants as l3_constants from neutron.common import utils", "ip_cidr = sn_port['ip_cidr'] snat_idx = self._get_snat_idx(ip_cidr) ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd", "a gateway for a dvr. The first step # is", "should contain the # code in the L3 agent that", "in floating_ips if i['host'] == self.host] def get_snat_interfaces(self): return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY,", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "if not host: LOG.debug(\"gw_port_host missing from router: %s\", self.router['id']) return", "is a sign that dvr needs two router classes. is_this_snat_host", "code for SNAT interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) is_this_snat_host", "' '--ctstate DNAT -j ACCEPT' % {'interface_name': interface_name}) self.iptables_manager.ipv4['nat'].add_rule(*rule) self.iptables_manager.apply()", "subnet for router and floating IP namespace link self.rtr_fip_subnet =", "on # creating/destroying interfaces and such. I think I'd like", "use_namespaces is set as False then the agent can only", "= sn_port['ip_cidr'] snat_idx = self._get_snat_idx(ip_cidr) ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd =", "== 0: #remove default route entry device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name)", "Add back the jump to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') #", "l3_constants from neutron.common import utils as common_utils from neutron.i18n import", "hosted on this agent.\"\"\" floating_ips = super(DvrRouter, self).get_floating_ips() return [i", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "self.agent.get_ports_by_subnet(subnet_id) for p in subnet_ports: if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS:", "The first step # is to move the creation of", "sn_port['network_id'], sn_port['id'], sn_port['ip_cidr'], sn_port['mac_address'], interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX) self._set_subnet_arp_info(port) def _dvr_internal_network_removed(self, port):", "else case - exception/log? device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.delete_route(fip_cidr, str(rtr_2_fip.ip))", "'send_redirects=0' % sn_int]) except Exception: LOG.exception(_LE('DVR: error adding redirection logic'))", "if not is_this_snat_host: return snat_interface = ( self.get_snat_int_device_name(sn_port['id'])) ns_name =", "calls below for bulk calls interface_name = self.get_internal_device_name(port['id']) device =", "port in router_ports: fips = port['fixed_ips'] for f in fips:", "in the L3 agent that creates a gateway for a", "import log as logging from oslo_utils import excutils from neutron.agent.l3", "= self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, []) return next( (p for p in fip_ports", "device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) # check if this", "def __init__(self, agent, host, *args, **kwargs): super(DvrRouter, self).__init__(*args, **kwargs) self.agent", "floating_ips = super(DvrRouter, self).get_floating_ips() return [i for i in floating_ips", "= 0x3fffffff class DvrRouter(router.RouterInfo): def __init__(self, agent, host, *args, **kwargs):", "= fip['fixed_ip_address'] rule_pr = self.fip_ns.allocate_rule_priority() self.floating_ips_dict[floating_ip] = rule_pr fip_2_rtr_name =", "Linklocal subnet for router and floating IP namespace link self.rtr_fip_subnet", "subnet_ports = self.agent.get_ports_by_subnet(subnet_id) for p in subnet_ports: if p['device_owner'] not", "rules for Floating IPs for DVR. Remove all the rules.", "= self._get_snat_idx(ip_cidr) ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipd.route.delete_gateway(gateway,", "self.snat_namespace.create() return self.snat_namespace def delete_snat_namespace(self): # TODO(mlavalle): in the near", "interface_name) # TODO(Carl) This is a sign that dvr needs", "common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip, ip_cidr) return l3_constants.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device, ip_cidr): super(DvrRouter,", "is the last FIP for this router self.dist_fip_count = self.dist_fip_count", "self).internal_network_removed(port) def get_floating_agent_gw_interface(self, ext_net_id): \"\"\"Filter Floating Agent GW port for", "ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name) ns_ipd.route.add_gateway(gateway, table=snat_idx) ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx)", "NAT rules for Floating IPs for DVR. Remove all the", "# code in the L3 agent that creates a gateway", "from FIP namespace.\"\"\" floating_ip = fip_cidr.split('/')[0] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) fip_2_rtr_name", "import dvr_snat_ns from neutron.agent.l3 import router_info as router from neutron.agent.linux", "= [p for p in snat_ports if p['fixed_ips'][0]['subnet_id'] == subnet_id]", "return match_port[0] else: LOG.error(_LE('DVR: no map match_port found!')) @staticmethod def", "is_this_snat_host: return snat_interface = ( self.get_snat_int_device_name(sn_port['id'])) ns_name = self.snat_namespace.name prefix", "two router classes. is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and self.get_gw_port_host()", "== subnet_id: return port def _update_arp_entry(self, ip, mac, subnet_id, operation):", "the SNAT port for the given internal interface port.\"\"\" fixed_ip", "port for the external network.\"\"\" fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, []) return", "fip_ns_name = self.fip_ns.get_name() rtr_2_fip, _ = self.rtr_fip_subnet.get_pair() device = ip_lib.IPDevice(fip_2_rtr_name,", "port['subnet']['id'] # TODO(Carl) Can we eliminate the need to make", "if not sn_port: return interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'], port, interface_name)", "neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import", "a dvr. The first step # is to move the", "move the creation of the snat namespace here self.snat_namespace =", "# the crc32 & 0xffffffff is for Python 2.6 and", "smaller values snat_idx = (snat_idx >> 30) ^ (snat_idx &", "self._add_fip_addr_to_device(fip, device): return l3_constants.FLOATINGIP_STATUS_ERROR # Special Handling for DVR -", "table=snat_idx) ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx) except Exception: LOG.exception(_LE('DVR: removed snat failed'))", "= ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name) ns_ipd.route.add_gateway(gateway,", "of the snat namespace here self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'], self.agent_conf, self.driver,", "not sn_port: return interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) #", "= self.get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name, sn_port['network_id'], sn_port['id'], sn_port['ip_cidr'], sn_port['mac_address'], interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX)", "router could # come in and want to start using", "if p['fixed_ips'][0]['subnet_id'] == subnet_id] if match_port: return match_port[0] else: LOG.error(_LE('DVR:", "# Special Handling for DVR - update FIP namespace ip_cidr", "= super(DvrRouter, self).get_floating_ips() return [i for i in floating_ips if", "floating IP namespace link self.rtr_fip_subnet = None self.dist_fip_count = None", "match_port = [p for p in snat_ports if p['fixed_ips'][0]['subnet_id'] ==", "for router and floating IP namespace link self.rtr_fip_subnet = None", "OF ANY KIND, either express or implied. See the #", "self.iptables_manager.apply() def floating_ip_added_dist(self, fip, fip_cidr): \"\"\"Add floating IP to FIP", "the subnet.\"\"\" port = self._get_internal_port(subnet_id) # update arp entry only", "ip_lib from neutron.common import constants as l3_constants from neutron.common import", "in writing, software # distributed under the License is distributed", "default route entry device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name) ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name)", "structures self.dist_fip_count = self.dist_fip_count + 1 def floating_ip_removed_dist(self, fip_cidr): \"\"\"Remove", "neutron.common import constants as l3_constants from neutron.common import utils as", "2015 Openstack Foundation # # Licensed under the Apache License,", "Floating IPs to be hosted on this agent.\"\"\" floating_ips =", "self).remove_floating_ip(device, ip_cidr) self.floating_ip_removed_dist(ip_cidr) def create_snat_namespace(self): # TODO(mlavalle): in the near", "entries i.e. 32768. For IPv4 use the numeric value of", "Copyright (c) 2015 Openstack Foundation # # Licensed under the", "semaphore to sync creation/deletion of this namespace. self.fip_ns.delete() self.fip_ns =", "in snat_ports if p['fixed_ips'][0]['subnet_id'] == subnet_id] if match_port: return match_port[0]", "32768: snat_idx = snat_idx + MASK_30 else: snat_idx = net.value", "L3 agent that creates a gateway for a dvr. The", "if net.version == 6: # the crc32 & 0xffffffff is", "destroying it. The two could end up conflicting on #", "system generated entries. \"\"\" net = netaddr.IPNetwork(ip_cidr) if net.version ==", "device, ip_cidr): super(DvrRouter, self).remove_floating_ip(device, ip_cidr) self.floating_ip_removed_dist(ip_cidr) def create_snat_namespace(self): # TODO(mlavalle):", "constants as l3_constants from neutron.common import utils as common_utils from", "DVR - update FIP namespace ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip, ip_cidr)", "in l3_constants.ROUTER_INTERFACE_OWNERS: for fixed_ip in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], subnet_id, 'add')", "sn_int]) except Exception: LOG.exception(_LE('DVR: error adding redirection logic')) def _snat_redirect_remove(self,", "port_id): long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id return long_name[:self.driver.DEV_NAME_LEN] def _handle_fip_nat_rules(self,", "logging.getLogger(__name__) # xor-folding mask used for IPv6 rule index MASK_30", "fip['fixed_ip_address'] rule_pr = self.fip_ns.allocate_rule_priority() self.floating_ips_dict[floating_ip] = rule_pr fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)", "with the License. You may obtain # a copy of", "host self.floating_ips_dict = {} self.snat_iptables_manager = None # Linklocal subnet", "as router from neutron.agent.linux import ip_lib from neutron.common import constants", "ip_cidr) self.floating_ip_removed_dist(ip_cidr) def create_snat_namespace(self): # TODO(mlavalle): in the near future,", "floating_ip in self.floating_ips_dict: rule_pr = self.floating_ips_dict[floating_ip] ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.delete(floating_ip,", "no map match_port found!')) @staticmethod def _get_snat_idx(ip_cidr): \"\"\"Generate index for", "on this agent.\"\"\" floating_ips = super(DvrRouter, self).get_floating_ips() return [i for", "the creation of the snat namespace here self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'],", "= self.get_internal_device_name(port['id']) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) if operation == 'add':", "for SNAT redirection.\"\"\" try: ip_cidr = sn_port['ip_cidr'] snat_idx = self._get_snat_idx(ip_cidr)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "and such. I think I'd like a # semaphore to", "#remove default route entry device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name) ns_ip =", "1 def floating_ip_removed_dist(self, fip_cidr): \"\"\"Remove floating IP from FIP namespace.\"\"\"", "namespace=fip_ns_name) device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) interface_name = ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) ip_lib.send_garp_for_proxyarp(fip_ns_name, interface_name,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "= self.rtr_fip_subnet.get_pair() fip_ns_name = self.fip_ns.get_name() if floating_ip in self.floating_ips_dict: rule_pr", "from neutron.agent.l3 import router_info as router from neutron.agent.linux import ip_lib", "snat_idx = self._get_snat_idx(ip_cidr) ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)", "ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) # check if this is the", "oslo_log import log as logging from oslo_utils import excutils from", "if use_namespaces is set as False then the agent can", "handling code for SNAT interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'], port, interface_name)", "except in compliance with the License. You may obtain #", "int_port['fixed_ips'][0] subnet_id = fixed_ip['subnet_id'] match_port = [p for p in", "None def get_floating_ips(self): \"\"\"Filter Floating IPs to be hosted on", "sign that dvr needs two router classes. is_this_snat_host = (self.agent_conf.agent_mode", "return interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) # TODO(Carl) This", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "namespace.\"\"\" floating_ip = fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address'] rule_pr = self.fip_ns.allocate_rule_priority()", "the L3 agent that creates a gateway for a dvr.", "first step is to move the deletion of the snat", "get_gw_port_host(self): host = self.router.get('gw_port_host') if not host: LOG.debug(\"gw_port_host missing from", "action == 'add_rules' and interface_name: rule = ('POSTROUTING', '! -i", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "= ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) # check if this is", "ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) self.fip_ns.deallocate_rule_priority(rule_pr) #TODO(rajeev): Handle else", "excutils.save_and_reraise_exception(): LOG.exception(_LE(\"DVR: Failed updating arp entry\")) def _set_subnet_arp_info(self, port): \"\"\"Set", "namespace link self.rtr_fip_subnet = None self.dist_fip_count = None self.snat_namespace =", "DVR. Remove all the rules. This is safe because if", "= binascii.crc32(ip_cidr) & 0xffffffff # xor-fold the hash to reserve", "classes. is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and self.get_gw_port_host() == self.host)", "Openstack Foundation # # Licensed under the Apache License, Version", "subnet.\"\"\" port = self._get_internal_port(subnet_id) # update arp entry only if", "def _update_arp_entry(self, ip, mac, subnet_id, operation): \"\"\"Add or delete arp", "% sn_int]) except Exception: LOG.exception(_LE('DVR: error adding redirection logic')) def", "code in the L3 agent that creates a gateway for", "\"\"\"Filter Floating IPs to be hosted on this agent.\"\"\" floating_ips", "%(interface_name)s -m conntrack ! ' '--ctstate DNAT -j ACCEPT' %", "but think that another router could # come in and", "self.fip_ns.get_name() rtr_2_fip, _ = self.rtr_fip_subnet.get_pair() device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.add_route(fip_cidr,", "# update arp entry only if the subnet is attached", "== 'dvr_snat' and self.get_gw_port_host() == self.host) if not is_this_snat_host: return", "-i %(interface_name)s ' '! -o %(interface_name)s -m conntrack ! '", "FIP namespace.\"\"\" floating_ip = fip_cidr.split('/')[0] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) fip_2_rtr_name =", "specific language governing permissions and limitations # under the License.", "# not use this file except in compliance with the", "= fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address'] rule_pr = self.fip_ns.allocate_rule_priority() self.floating_ips_dict[floating_ip] =", "the numeric value of the cidr. For IPv6 generate a", "return snat_interface = ( self.get_snat_int_device_name(sn_port['id'])) ns_name = self.snat_namespace.name prefix =", "all the rules. This is safe because if use_namespaces is", "if not ex_gw_port: return snat_ports = self.get_snat_interfaces() sn_port = self._map_internal_interfaces(port,", "device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) # check if this is the last FIP", "up conflicting on # creating/destroying interfaces and such. I think", "SNAT port for the given internal interface port.\"\"\" fixed_ip =", "def _handle_fip_nat_rules(self, interface_name, action): \"\"\"Configures NAT rules for Floating IPs", "def floating_ip_added_dist(self, fip, fip_cidr): \"\"\"Add floating IP to FIP namespace.\"\"\"", "= self.fip_ns.get_int_device_name(self.router_id) ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) #Add routing", "== 'dvr_snat' and self.ex_gw_port['binding:host_id'] == self.host) if not is_this_snat_host: return", "= int_port['fixed_ips'][0] subnet_id = fixed_ip['subnet_id'] match_port = [p for p", "+ port_id return long_name[:self.driver.DEV_NAME_LEN] def _handle_fip_nat_rules(self, interface_name, action): \"\"\"Configures NAT", "under the License is distributed on an \"AS IS\" BASIS,", "make this RPC while # processing a router. subnet_ports =", "mac, subnet_id, operation): \"\"\"Add or delete arp entry into router", "eliminate the need to make this RPC while # processing", "for DVR - update FIP namespace ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip,", "except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE(\"DVR: Failed updating arp entry\")) def", "ext_net_id): \"\"\"Filter Floating Agent GW port for the external network.\"\"\"", "The # first step is to move the deletion of", "the hash to reserve upper range to extend smaller values", "self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and", "this file except in compliance with the License. You may", "This is safe because if use_namespaces is set as False", "I'd like a # semaphore to sync creation/deletion of this", "the system generated entries i.e. 32768. For IPv4 use the", "the calls below for bulk calls interface_name = self.get_internal_device_name(port['id']) device", "end up conflicting on # creating/destroying interfaces and such. I", "self.snat_namespace.name prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface, namespace=ns_name): self.driver.unplug(snat_interface, namespace=ns_name, prefix=prefix)", "class DvrRouter(router.RouterInfo): def __init__(self, agent, host, *args, **kwargs): super(DvrRouter, self).__init__(*args,", "a router. subnet_ports = self.agent.get_ports_by_subnet(subnet_id) for p in subnet_ports: if", "map match_port found!')) @staticmethod def _get_snat_idx(ip_cidr): \"\"\"Generate index for DVR", "snat_ports if p['fixed_ips'][0]['subnet_id'] == subnet_id] if match_port: return match_port[0] else:", "def remove_floating_ip(self, device, ip_cidr): super(DvrRouter, self).remove_floating_ip(device, ip_cidr) self.floating_ip_removed_dist(ip_cidr) def create_snat_namespace(self):", "device.route.delete_gateway(str(fip_2_rtr.ip), table=dvr_fip_ns.FIP_RT_TBL) self.fip_ns.local_subnets.release(self.router_id) self.rtr_fip_subnet = None ns_ip.del_veth(fip_2_rtr_name) is_last = self.fip_ns.unsubscribe(self.router_id)", "except Exception: LOG.exception(_LE('DVR: removed snat failed')) def get_gw_port_host(self): host =", "file except in compliance with the License. You may obtain", "\"\"\"Add floating IP to FIP namespace.\"\"\" floating_ip = fip['floating_ip_address'] fixed_ip", "= {} self.snat_iptables_manager = None # Linklocal subnet for router", "entry only if the subnet is attached to the router", "entry\")) def _set_subnet_arp_info(self, port): \"\"\"Set ARP info retrieved from Plugin", "30 bits. Use the freed range to extend smaller values", "snat rules and route tables. The index value has to", "OR CONDITIONS OF ANY KIND, either express or implied. See", "not self.ex_gw_port: return sn_port = self._map_internal_interfaces(port, self.snat_ports) if not sn_port:", "fip, interface_name, device): if not self._add_fip_addr_to_device(fip, device): return l3_constants.FLOATINGIP_STATUS_ERROR #", "def create_snat_namespace(self): # TODO(mlavalle): in the near future, this method", "the License. import binascii import netaddr from oslo_log import log", "match_port found!')) @staticmethod def _get_snat_idx(ip_cidr): \"\"\"Generate index for DVR snat", "the last FIP for this router self.dist_fip_count = self.dist_fip_count -", "self.floating_ips_dict[floating_ip] = rule_pr fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.add(fixed_ip,", "return try: # TODO(mrsmith): optimize the calls below for bulk", "adding redirection logic')) def _snat_redirect_remove(self, gateway, sn_port, sn_int): \"\"\"Removes rules", "action is add_rules if action == 'add_rules' and interface_name: rule", "match_port: return match_port[0] else: LOG.error(_LE('DVR: no map match_port found!')) @staticmethod", "self.fip_ns.get_int_device_name(self.router_id) ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) #Add routing rule", "MASK_30 else: snat_idx = net.value return snat_idx def _snat_redirect_add(self, gateway,", "self.snat_namespace = None def _get_internal_port(self, subnet_id): \"\"\"Return internal router port", "p['mac_address'], subnet_id, 'add') def _map_internal_interfaces(self, int_port, snat_ports): \"\"\"Return the SNAT", "agent that removes an external gateway for a dvr. The", "self._set_subnet_info(sn_port) interface_name = self.get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name, sn_port['network_id'], sn_port['id'], sn_port['ip_cidr'], sn_port['mac_address'],", "failed')) def get_gw_port_host(self): host = self.router.get('gw_port_host') if not host: LOG.debug(\"gw_port_host", "ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name) ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name) device.route.delete_gateway(str(fip_2_rtr.ip), table=dvr_fip_ns.FIP_RT_TBL) self.fip_ns.local_subnets.release(self.router_id) self.rtr_fip_subnet =", "l3_constants.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device, ip_cidr): super(DvrRouter, self).remove_floating_ip(device, ip_cidr) self.floating_ip_removed_dist(ip_cidr) def", "router port based on subnet_id.\"\"\" router_ports = self.router.get(l3_constants.INTERFACE_KEY, []) for", "# xor-folding mask used for IPv6 rule index MASK_30 =", "port): \"\"\"Set ARP info retrieved from Plugin for existing ports.\"\"\"", "port: return try: # TODO(mrsmith): optimize the calls below for", "ns_ipd.route.delete_gateway(gateway, table=snat_idx) ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx) except Exception: LOG.exception(_LE('DVR: removed snat", "xor-fold the hash to reserve upper range to extend smaller", "namespace=ns_name, prefix=prefix) def internal_network_removed(self, port): self._dvr_internal_network_removed(port) super(DvrRouter, self).internal_network_removed(port) def get_floating_agent_gw_interface(self,", "given internal interface port.\"\"\" fixed_ip = int_port['fixed_ips'][0] subnet_id = fixed_ip['subnet_id']", "fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) #Add", "= agent self.host = host self.floating_ips_dict = {} self.snat_iptables_manager =", "writing, software # distributed under the License is distributed on", "IPv6 rule index MASK_30 = 0x3fffffff class DvrRouter(router.RouterInfo): def __init__(self,", "the License. You may obtain # a copy of the", "the deletion of the snat namespace here self.snat_namespace.delete() self.snat_namespace =", "use this file except in compliance with the License. You", "= fixed_ip['subnet_id'] match_port = [p for p in snat_ports if", "internal router port based on subnet_id.\"\"\" router_ports = self.router.get(l3_constants.INTERFACE_KEY, [])", "to move the creation of the snat namespace here self.snat_namespace", "'dvr_snat' and self.get_gw_port_host() == self.host) if not is_this_snat_host: return ns_name", "DNAT -j ACCEPT' % {'interface_name': interface_name}) self.iptables_manager.ipv4['nat'].add_rule(*rule) self.iptables_manager.apply() def floating_ip_added_dist(self,", "= self.dist_fip_count - 1 if self.dist_fip_count == 0: #remove default", "external network.\"\"\" fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, []) return next( (p for", "range to extend smaller values so that they become greater", "floating IP from FIP namespace.\"\"\" floating_ip = fip_cidr.split('/')[0] rtr_2_fip_name =", "router from neutron.agent.linux import ip_lib from neutron.common import constants as", "dvr. The # first step is to move the deletion", "port, interface_name) # TODO(Carl) This is a sign that dvr", "else: LOG.error(_LE('DVR: no map match_port found!')) @staticmethod def _get_snat_idx(ip_cidr): \"\"\"Generate", "crc32 bit hash and xor-fold to 30 bits. Use the", "self.get_snat_interfaces() sn_port = self._map_internal_interfaces(port, snat_ports) if not sn_port: return interface_name", "subnet_id.\"\"\" router_ports = self.router.get(l3_constants.INTERFACE_KEY, []) for port in router_ports: fips", "try: # TODO(mrsmith): optimize the calls below for bulk calls", "self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) # TODO(Carl) This is a sign", "= self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) # TODO(Carl) This is a", "express or implied. See the # License for the specific", "for existing ports.\"\"\" if 'id' not in port['subnet']: return subnet_id", "the Apache License, Version 2.0 (the \"License\"); you may #", "# DVR handling code for SNAT interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'],", "L3 agent that removes an external gateway for a dvr.", "port = self._get_internal_port(subnet_id) # update arp entry only if the", "mask used for IPv6 rule index MASK_30 = 0x3fffffff class", "str(rtr_2_fip.ip)) interface_name = ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) ip_lib.send_garp_for_proxyarp(fip_ns_name, interface_name, floating_ip, self.agent_conf.send_arp_for_ha)", "rules and route tables. The index value has to be", "self.host) if not is_this_snat_host: return ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) self._set_subnet_info(sn_port) interface_name", "compatibility snat_idx = binascii.crc32(ip_cidr) & 0xffffffff # xor-fold the hash", "& MASK_30) if snat_idx < 32768: snat_idx = snat_idx +", "'net.ipv4.conf.%s.' 'send_redirects=0' % sn_int]) except Exception: LOG.exception(_LE('DVR: error adding redirection", "get_snat_int_device_name(self, port_id): long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id return long_name[:self.driver.DEV_NAME_LEN] def", "[p for p in snat_ports if p['fixed_ips'][0]['subnet_id'] == subnet_id] if", "= self.agent.get_ports_by_subnet(subnet_id) for p in subnet_ports: if p['device_owner'] not in", "agent can only configure one router, otherwise each router's NAT", "like a # semaphore to sync creation/deletion of this namespace.", "self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair() fip_ns_name = self.fip_ns.get_name() if", "redirection.\"\"\" try: ip_cidr = sn_port['ip_cidr'] snat_idx = self._get_snat_idx(ip_cidr) ns_ipr =", "dvr_fip_ns.FIP_RT_TBL, rule_pr) self.fip_ns.deallocate_rule_priority(rule_pr) #TODO(rajeev): Handle else case - exception/log? device", "p in subnet_ports: if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS: for fixed_ip", "be hosted on this agent.\"\"\" floating_ips = super(DvrRouter, self).get_floating_ips() return", "conflicting on # creating/destroying interfaces and such. I think I'd", "not port: return try: # TODO(mrsmith): optimize the calls below", "this is # destroying it. The two could end up", "ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name)", "if not self.ex_gw_port: return sn_port = self._map_internal_interfaces(port, self.snat_ports) if not", "back the jump to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') # And", "to extend smaller values so that they become greater than", "snat_ports) if not sn_port: return interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'], port,", "limitations # under the License. import binascii import netaddr from", "= rule_pr fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL,", "SNAT redirection.\"\"\" try: ip_cidr = sn_port['ip_cidr'] snat_idx = self._get_snat_idx(ip_cidr) ns_ipr", "= None # Linklocal subnet for router and floating IP", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "bit hash and xor-fold to 30 bits. Use the freed", "fip namespace fip_ns_name = self.fip_ns.get_name() rtr_2_fip, _ = self.rtr_fip_subnet.get_pair() device", "import ip_lib from neutron.common import constants as l3_constants from neutron.common", "for the given internal interface port.\"\"\" fixed_ip = int_port['fixed_ips'][0] subnet_id", "None self.snat_namespace = None def get_floating_ips(self): \"\"\"Filter Floating IPs to", "device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) interface_name = ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) ip_lib.send_garp_for_proxyarp(fip_ns_name, interface_name, floating_ip,", "return long_name[:self.driver.DEV_NAME_LEN] def _handle_fip_nat_rules(self, interface_name, action): \"\"\"Configures NAT rules for", "NAT rules will be in their own namespace. \"\"\" self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')", "otherwise each router's NAT rules will be in their own", "__init__(self, agent, host, *args, **kwargs): super(DvrRouter, self).__init__(*args, **kwargs) self.agent =", "namespace for the subnet.\"\"\" port = self._get_internal_port(subnet_id) # update arp", "'-j $float-snat') # And add them back if the action", "own namespace. \"\"\" self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat') # Add back the jump", "For IPv4 use the numeric value of the cidr. For", "try: ip_cidr = sn_port['ip_cidr'] snat_idx = self._get_snat_idx(ip_cidr) ns_ipr = ip_lib.IPRule(namespace=self.ns_name)", "License for the specific language governing permissions and limitations #", "= ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) interface_name = ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id']))", "= ( self.get_snat_int_device_name(sn_port['id'])) ns_name = self.snat_namespace.name prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX if", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX) self._set_subnet_arp_info(port) def _dvr_internal_network_removed(self, port): if not self.ex_gw_port: return", "int_port, snat_ports): \"\"\"Return the SNAT port for the given internal", "sn_port, sn_int): \"\"\"Adds rules and routes for SNAT redirection.\"\"\" try:", "('POSTROUTING', '! -i %(interface_name)s ' '! -o %(interface_name)s -m conntrack", "create_snat_namespace(self): # TODO(mlavalle): in the near future, this method should", "& 0xffffffff is for Python 2.6 and 3.0 compatibility snat_idx", "return subnet_id = port['subnet']['id'] # TODO(Carl) Can we eliminate the", "is set as False then the agent can only configure", "return next( (p for p in fip_ports if p['network_id'] ==", "for p in subnet_ports: if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS: for", "def _get_internal_port(self, subnet_id): \"\"\"Return internal router port based on subnet_id.\"\"\"", "SNAT interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) is_this_snat_host = (self.agent_conf.agent_mode", "- 1 if self.dist_fip_count == 0: #remove default route entry", "\"\"\"Set ARP info retrieved from Plugin for existing ports.\"\"\" if", "snat_idx = binascii.crc32(ip_cidr) & 0xffffffff # xor-fold the hash to", "if floating_ip in self.floating_ips_dict: rule_pr = self.floating_ips_dict[floating_ip] ip_rule = ip_lib.IPRule(namespace=self.ns_name)", "and routes for SNAT redirection.\"\"\" try: ip_cidr = sn_port['ip_cidr'] snat_idx", "else: snat_idx = net.value return snat_idx def _snat_redirect_add(self, gateway, sn_port,", "is_last = self.fip_ns.unsubscribe(self.router_id) if is_last: # TODO(Carl) I can't help", "to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') # And add them back", "each router's NAT rules will be in their own namespace.", "self.fip_ns.get_rtr_ext_device_name(self.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) if self.rtr_fip_subnet is None: self.rtr_fip_subnet =", "to extend smaller values snat_idx = (snat_idx >> 30) ^", "the cidr. For IPv6 generate a crc32 bit hash and", "to be hosted on this agent.\"\"\" floating_ips = super(DvrRouter, self).get_floating_ips()", "ACCEPT' % {'interface_name': interface_name}) self.iptables_manager.ipv4['nat'].add_rule(*rule) self.iptables_manager.apply() def floating_ip_added_dist(self, fip, fip_cidr):", "= (self.agent_conf.agent_mode == 'dvr_snat' and self.ex_gw_port['binding:host_id'] == self.host) if not", "for the external network.\"\"\" fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, []) return next(", "need to make this RPC while # processing a router.", "the # License for the specific language governing permissions and", "= ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) self.fip_ns.deallocate_rule_priority(rule_pr) #TODO(rajeev): Handle else case", "[]) return next( (p for p in fip_ports if p['network_id']", "not ex_gw_port: return snat_ports = self.get_snat_interfaces() sn_port = self._map_internal_interfaces(port, snat_ports)", "netaddr.IPNetwork(ip_cidr) if net.version == 6: # the crc32 & 0xffffffff", "less but more than the system generated entries i.e. 32768.", "self.snat_iptables_manager = None # Linklocal subnet for router and floating", "to FIP namespace.\"\"\" floating_ip = fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address'] rule_pr", "return host def internal_network_added(self, port): super(DvrRouter, self).internal_network_added(port) ex_gw_port = self.get_ex_gw_port()", "internal_network_removed(self, port): self._dvr_internal_network_removed(port) super(DvrRouter, self).internal_network_removed(port) def get_floating_agent_gw_interface(self, ext_net_id): \"\"\"Filter Floating", "existing ports.\"\"\" if 'id' not in port['subnet']: return subnet_id =", "not in l3_constants.ROUTER_INTERFACE_OWNERS: for fixed_ip in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], subnet_id,", "if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS: for fixed_ip in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'],", "[i for i in floating_ips if i['host'] == self.host] def", "float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') # And add them back if", "except Exception: LOG.exception(_LE('DVR: error adding redirection logic')) def _snat_redirect_remove(self, gateway,", "p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], subnet_id, 'add') def _map_internal_interfaces(self, int_port, snat_ports): \"\"\"Return", "def get_gw_port_host(self): host = self.router.get('gw_port_host') if not host: LOG.debug(\"gw_port_host missing", "them back if the action is add_rules if action ==", "+ 1 def floating_ip_removed_dist(self, fip_cidr): \"\"\"Remove floating IP from FIP", "return port def _update_arp_entry(self, ip, mac, subnet_id, operation): \"\"\"Add or", "== self.host) if not is_this_snat_host: return ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) self._set_subnet_info(sn_port)", "snat_idx = net.value return snat_idx def _snat_redirect_add(self, gateway, sn_port, sn_int):", "\"\"\"Return internal router port based on subnet_id.\"\"\" router_ports = self.router.get(l3_constants.INTERFACE_KEY,", "interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) is_this_snat_host = (self.agent_conf.agent_mode ==", "because if use_namespaces is set as False then the agent", "self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) # TODO(Carl) This is a sign that", "return l3_constants.FLOATINGIP_STATUS_ERROR # Special Handling for DVR - update FIP", "self).get_floating_ips() return [i for i in floating_ips if i['host'] ==", "rtr_2_fip, _ = self.rtr_fip_subnet.get_pair() device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.add_route(fip_cidr, str(rtr_2_fip.ip))", "than the system generated entries i.e. 32768. For IPv4 use", "def internal_network_removed(self, port): self._dvr_internal_network_removed(port) super(DvrRouter, self).internal_network_removed(port) def get_floating_agent_gw_interface(self, ext_net_id): \"\"\"Filter", "= self.fip_ns.allocate_rule_priority() self.floating_ips_dict[floating_ip] = rule_pr fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) ip_rule =", "The two could end up conflicting on # creating/destroying interfaces", "rule_pr) #Add routing rule in fip namespace fip_ns_name = self.fip_ns.get_name()", "Remove all the rules. This is safe because if use_namespaces", "this agent.\"\"\" floating_ips = super(DvrRouter, self).get_floating_ips() return [i for i", "namespace here self.snat_namespace.delete() self.snat_namespace = None def _get_internal_port(self, subnet_id): \"\"\"Return", "as False then the agent can only configure one router,", "could # come in and want to start using this", "with excutils.save_and_reraise_exception(): LOG.exception(_LE(\"DVR: Failed updating arp entry\")) def _set_subnet_arp_info(self, port):", "to the router if not port: return try: # TODO(mrsmith):", "return snat_idx def _snat_redirect_add(self, gateway, sn_port, sn_int): \"\"\"Adds rules and", "route entry device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name) ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name) device.route.delete_gateway(str(fip_2_rtr.ip),", "ports.\"\"\" if 'id' not in port['subnet']: return subnet_id = port['subnet']['id']", "router classes. is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and self.get_gw_port_host() ==", "gateway, sn_port, sn_int): \"\"\"Removes rules and routes for SNAT redirection.\"\"\"", "self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) ip_lib.send_garp_for_proxyarp(fip_ns_name, interface_name, floating_ip, self.agent_conf.send_arp_for_ha) # update internal structures", "DVR handling code for SNAT interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'], port,", "Agent GW port for the external network.\"\"\" fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY,", "value has to be 32 bits or less but more", "internal structures self.dist_fip_count = self.dist_fip_count + 1 def floating_ip_removed_dist(self, fip_cidr):", "conntrack ! ' '--ctstate DNAT -j ACCEPT' % {'interface_name': interface_name})", "extend smaller values snat_idx = (snat_idx >> 30) ^ (snat_idx", "0xffffffff # xor-fold the hash to reserve upper range to", "*args, **kwargs): super(DvrRouter, self).__init__(*args, **kwargs) self.agent = agent self.host =", "action): \"\"\"Configures NAT rules for Floating IPs for DVR. Remove", "then the agent can only configure one router, otherwise each", "'! -o %(interface_name)s -m conntrack ! ' '--ctstate DNAT -j", "net = netaddr.IPNetwork(ip_cidr) if net.version == 6: # the crc32", "self.fip_ns.local_subnets.release(self.router_id) self.rtr_fip_subnet = None ns_ip.del_veth(fip_2_rtr_name) is_last = self.fip_ns.unsubscribe(self.router_id) if is_last:", "( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) ip_lib.send_garp_for_proxyarp(fip_ns_name, interface_name, floating_ip, self.agent_conf.send_arp_for_ha) # update internal", "start using this namespace while this is # destroying it.", "more than the system generated entries i.e. 32768. For IPv4", "self._map_internal_interfaces(port, self.snat_ports) if not sn_port: return # DVR handling code", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "_ = self.rtr_fip_subnet.get_pair() device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) interface_name", "None # Linklocal subnet for router and floating IP namespace", "delete arp entry into router namespace for the subnet.\"\"\" port", "move the deletion of the snat namespace here self.snat_namespace.delete() self.snat_namespace", "namespace while this is # destroying it. The two could", "set as False then the agent can only configure one", "you may # not use this file except in compliance", "# is to move the creation of the snat namespace", "= ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipd.route.delete_gateway(gateway, table=snat_idx) ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx) except Exception:", "the action is add_rules if action == 'add_rules' and interface_name:", "sn_port: return # DVR handling code for SNAT interface_name =", "value of the cidr. For IPv6 generate a crc32 bit", "attached to the router if not port: return try: #", "(p for p in fip_ports if p['network_id'] == ext_net_id), None)", "self).__init__(*args, **kwargs) self.agent = agent self.host = host self.floating_ips_dict =", "into router namespace for the subnet.\"\"\" port = self._get_internal_port(subnet_id) #", "= self.fip_ns.get_name() rtr_2_fip, _ = self.rtr_fip_subnet.get_pair() device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)", "generated entries i.e. 32768. For IPv4 use the numeric value", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "\"\"\"Generate index for DVR snat rules and route tables. The", "fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address'] rule_pr = self.fip_ns.allocate_rule_priority() self.floating_ips_dict[floating_ip] = rule_pr", "i in floating_ips if i['host'] == self.host] def get_snat_interfaces(self): return", "operation): \"\"\"Add or delete arp entry into router namespace for", "def get_snat_interfaces(self): return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) def get_snat_int_device_name(self, port_id): long_name =", "# TODO(Carl) Can we eliminate the need to make this", "for this router self.dist_fip_count = self.dist_fip_count - 1 if self.dist_fip_count", "the need to make this RPC while # processing a", "snat namespace here self.snat_namespace.delete() self.snat_namespace = None def _get_internal_port(self, subnet_id):", "the rules. This is safe because if use_namespaces is set", "for the specific language governing permissions and limitations # under", "sync creation/deletion of this namespace. self.fip_ns.delete() self.fip_ns = None def", "ex_gw_port = self.get_ex_gw_port() if not ex_gw_port: return snat_ports = self.get_snat_interfaces()", "(snat_idx & MASK_30) if snat_idx < 32768: snat_idx = snat_idx", "route tables. The index value has to be 32 bits", "self.agent = agent self.host = host self.floating_ips_dict = {} self.snat_iptables_manager", "in their own namespace. \"\"\" self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat') # Add back", "self._internal_network_added( ns_name, sn_port['network_id'], sn_port['id'], sn_port['ip_cidr'], sn_port['mac_address'], interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX) self._set_subnet_arp_info(port) def", "jump to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') # And add them", "for SNAT interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) is_this_snat_host =", "self.ex_gw_port: return sn_port = self._map_internal_interfaces(port, self.snat_ports) if not sn_port: return", "$float-snat') # And add them back if the action is", "contain the # code in the L3 agent that removes", "redirection logic')) def _snat_redirect_remove(self, gateway, sn_port, sn_int): \"\"\"Removes rules and", "port): self._dvr_internal_network_removed(port) super(DvrRouter, self).internal_network_removed(port) def get_floating_agent_gw_interface(self, ext_net_id): \"\"\"Filter Floating Agent", "Handling for DVR - update FIP namespace ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])", "== self.host] def get_snat_interfaces(self): return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) def get_snat_int_device_name(self, port_id):", "namespace=fip_ns_name) device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) # check if this is the last", "ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip, ip_cidr) return l3_constants.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device,", "port based on subnet_id.\"\"\" router_ports = self.router.get(l3_constants.INTERFACE_KEY, []) for port", "= self.fip_ns.get_int_device_name(self.router_id) if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id)", "self.router.get(l3_constants.INTERFACE_KEY, []) for port in router_ports: fips = port['fixed_ips'] for", "sn_port['ip_cidr'] snat_idx = self._get_snat_idx(ip_cidr) ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int,", "= ip_lib.IPWrapper(namespace=fip_ns_name) device.route.delete_gateway(str(fip_2_rtr.ip), table=dvr_fip_ns.FIP_RT_TBL) self.fip_ns.local_subnets.release(self.router_id) self.rtr_fip_subnet = None ns_ip.del_veth(fip_2_rtr_name) is_last", "IPv6 generate a crc32 bit hash and xor-fold to 30", "I can't help but think that another router could #", "i.e. 32768. For IPv4 use the numeric value of the", "\"\"\" net = netaddr.IPNetwork(ip_cidr) if net.version == 6: # the", "#TODO(rajeev): Handle else case - exception/log? device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)", "governing permissions and limitations # under the License. import binascii", "floating_ips if i['host'] == self.host] def get_snat_interfaces(self): return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])", "interface_name) is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and self.ex_gw_port['binding:host_id'] == self.host)", "self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat') # And add them back if the", "IP from FIP namespace.\"\"\" floating_ip = fip_cidr.split('/')[0] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)", "= ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) #Add routing rule in fip", "= port['fixed_ips'] for f in fips: if f['subnet_id'] == subnet_id:", "self.rtr_fip_subnet = None self.dist_fip_count = None self.snat_namespace = None def", "configure one router, otherwise each router's NAT rules will be", "subnet_id: return port def _update_arp_entry(self, ip, mac, subnet_id, operation): \"\"\"Add", "+ MASK_30 else: snat_idx = net.value return snat_idx def _snat_redirect_add(self,", "not self._add_fip_addr_to_device(fip, device): return l3_constants.FLOATINGIP_STATUS_ERROR # Special Handling for DVR", "to make this RPC while # processing a router. subnet_ports", "has to be 32 bits or less but more than", "operation == 'delete': device.neigh.delete(ip, mac) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE(\"DVR:", "fixed_ip = fip['fixed_ip_address'] rule_pr = self.fip_ns.allocate_rule_priority() self.floating_ips_dict[floating_ip] = rule_pr fip_2_rtr_name", "self.get_snat_int_device_name(sn_port['id'])) ns_name = self.snat_namespace.name prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface, namespace=ns_name):", "namespace.\"\"\" floating_ip = fip_cidr.split('/')[0] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)", "ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) #Add routing rule in fip namespace", "to reserve upper range to extend smaller values snat_idx =", "in and want to start using this namespace while this", "= dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) self._set_subnet_info(sn_port) interface_name = self.get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name, sn_port['network_id'], sn_port['id'],", "for the subnet.\"\"\" port = self._get_internal_port(subnet_id) # update arp entry", "the freed range to extend smaller values so that they", "You may obtain # a copy of the License at", "= self._map_internal_interfaces(port, self.snat_ports) if not sn_port: return # DVR handling", "device.neigh.add(ip, mac) elif operation == 'delete': device.neigh.delete(ip, mac) except Exception:", "@staticmethod def _get_snat_idx(ip_cidr): \"\"\"Generate index for DVR snat rules and", "we eliminate the need to make this RPC while #", "smaller values so that they become greater than system generated", "in self.floating_ips_dict: rule_pr = self.floating_ips_dict[floating_ip] ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL,", "is_last: # TODO(Carl) I can't help but think that another", "= ip_lib.IPDevice(interface_name, namespace=self.ns_name) if operation == 'add': device.neigh.add(ip, mac) elif", "greater than system generated entries. \"\"\" net = netaddr.IPNetwork(ip_cidr) if", "for Floating IPs for DVR. Remove all the rules. This", "'dvr_snat' and self.ex_gw_port['binding:host_id'] == self.host) if not is_this_snat_host: return snat_interface", "== 6: # the crc32 & 0xffffffff is for Python", "in router_ports: fips = port['fixed_ips'] for f in fips: if", "subnet_id] if match_port: return match_port[0] else: LOG.error(_LE('DVR: no map match_port", "' '! -o %(interface_name)s -m conntrack ! ' '--ctstate DNAT", "= netaddr.IPNetwork(ip_cidr) if net.version == 6: # the crc32 &", "get_floating_agent_gw_interface(self, ext_net_id): \"\"\"Filter Floating Agent GW port for the external", "the agent can only configure one router, otherwise each router's", "self.iptables_manager.ipv4['nat'].add_rule(*rule) self.iptables_manager.apply() def floating_ip_added_dist(self, fip, fip_cidr): \"\"\"Add floating IP to", "software # distributed under the License is distributed on an", "(the \"License\"); you may # not use this file except", "snat_idx, snat_idx) except Exception: LOG.exception(_LE('DVR: removed snat failed')) def get_gw_port_host(self):", "device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) interface_name = ( self.fip_ns.get_ext_device_name(", "self.agent_conf, self.driver, self.use_ipv6) self.snat_namespace.create() return self.snat_namespace def delete_snat_namespace(self): # TODO(mlavalle):", "found!')) @staticmethod def _get_snat_idx(ip_cidr): \"\"\"Generate index for DVR snat rules", "interfaces and such. I think I'd like a # semaphore", "namespace=self.ns_name) ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name) ns_ipd.route.add_gateway(gateway, table=snat_idx) ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx) ns_ipwrapr.netns.execute(['sysctl',", "- exception/log? device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.delete_route(fip_cidr, str(rtr_2_fip.ip)) # check", "dvr_fip_ns.FIP_RT_TBL, rule_pr) #Add routing rule in fip namespace fip_ns_name =", "ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) self.fip_ns.deallocate_rule_priority(rule_pr) #TODO(rajeev): Handle else case -", "become greater than system generated entries. \"\"\" net = netaddr.IPNetwork(ip_cidr)", "Use the freed range to extend smaller values so that", "prefix=prefix) def internal_network_removed(self, port): self._dvr_internal_network_removed(port) super(DvrRouter, self).internal_network_removed(port) def get_floating_agent_gw_interface(self, ext_net_id):", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "rule = ('POSTROUTING', '! -i %(interface_name)s ' '! -o %(interface_name)s", "if the action is add_rules if action == 'add_rules' and", "port): if not self.ex_gw_port: return sn_port = self._map_internal_interfaces(port, self.snat_ports) if", "super(DvrRouter, self).get_floating_ips() return [i for i in floating_ips if i['host']", "host = self.router.get('gw_port_host') if not host: LOG.debug(\"gw_port_host missing from router:", "'-w', 'net.ipv4.conf.%s.' 'send_redirects=0' % sn_int]) except Exception: LOG.exception(_LE('DVR: error adding", "for IPv6 rule index MASK_30 = 0x3fffffff class DvrRouter(router.RouterInfo): def", "only configure one router, otherwise each router's NAT rules will", "xor-folding mask used for IPv6 rule index MASK_30 = 0x3fffffff", "can't help but think that another router could # come", "for DVR snat rules and route tables. The index value", "fip_cidr): \"\"\"Add floating IP to FIP namespace.\"\"\" floating_ip = fip['floating_ip_address']", "super(DvrRouter, self).internal_network_added(port) ex_gw_port = self.get_ex_gw_port() if not ex_gw_port: return snat_ports", "def _snat_redirect_add(self, gateway, sn_port, sn_int): \"\"\"Adds rules and routes for", "snat_ports = self.get_snat_interfaces() sn_port = self._map_internal_interfaces(port, snat_ports) if not sn_port:", "in fips: if f['subnet_id'] == subnet_id: return port def _update_arp_entry(self,", "RPC while # processing a router. subnet_ports = self.agent.get_ports_by_subnet(subnet_id) for", "and route tables. The index value has to be 32", "= ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name) ns_ipd.route.add_gateway(gateway, table=snat_idx) ns_ipr.rule.add(ip_cidr, snat_idx,", "ARP info retrieved from Plugin for existing ports.\"\"\" if 'id'", "for port in router_ports: fips = port['fixed_ips'] for f in", "LOG.exception(_LE('DVR: removed snat failed')) def get_gw_port_host(self): host = self.router.get('gw_port_host') if", "self._get_snat_idx(ip_cidr) ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipwrapr =", "import netaddr from oslo_log import log as logging from oslo_utils", "to start using this namespace while this is # destroying", "sn_port = self._map_internal_interfaces(port, snat_ports) if not sn_port: return interface_name =", "= self.rtr_fip_subnet.get_pair() device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) interface_name =", "return l3_constants.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device, ip_cidr): super(DvrRouter, self).remove_floating_ip(device, ip_cidr) self.floating_ip_removed_dist(ip_cidr)", "== 'add_rules' and interface_name: rule = ('POSTROUTING', '! -i %(interface_name)s", "# creating/destroying interfaces and such. I think I'd like a", "to be 32 bits or less but more than the", "= self.dist_fip_count + 1 def floating_ip_removed_dist(self, fip_cidr): \"\"\"Remove floating IP", "= net.value return snat_idx def _snat_redirect_add(self, gateway, sn_port, sn_int): \"\"\"Adds", "snat_idx def _snat_redirect_add(self, gateway, sn_port, sn_int): \"\"\"Adds rules and routes", "return self.snat_namespace def delete_snat_namespace(self): # TODO(mlavalle): in the near future,", "host def internal_network_added(self, port): super(DvrRouter, self).internal_network_added(port) ex_gw_port = self.get_ex_gw_port() if", "0x3fffffff class DvrRouter(router.RouterInfo): def __init__(self, agent, host, *args, **kwargs): super(DvrRouter,", "missing from router: %s\", self.router['id']) return host def internal_network_added(self, port):", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "MASK_30 = 0x3fffffff class DvrRouter(router.RouterInfo): def __init__(self, agent, host, *args,", "snat failed')) def get_gw_port_host(self): host = self.router.get('gw_port_host') if not host:", "not sn_port: return # DVR handling code for SNAT interface_name", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "self.iptables_manager.ipv4['nat'].empty_chain('snat') # Add back the jump to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j", "the subnet is attached to the router if not port:", "device): if not self._add_fip_addr_to_device(fip, device): return l3_constants.FLOATINGIP_STATUS_ERROR # Special Handling", "( self.get_snat_int_device_name(sn_port['id'])) ns_name = self.snat_namespace.name prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface,", "while this is # destroying it. The two could end", "interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) # TODO(Carl) This is", "than system generated entries. \"\"\" net = netaddr.IPNetwork(ip_cidr) if net.version", "def get_floating_agent_gw_interface(self, ext_net_id): \"\"\"Filter Floating Agent GW port for the", "LOG.debug(\"gw_port_host missing from router: %s\", self.router['id']) return host def internal_network_added(self,", "# update internal structures self.dist_fip_count = self.dist_fip_count + 1 def", "self.fip_ns = None def add_floating_ip(self, fip, interface_name, device): if not", "Special Handling for DVR - update FIP namespace ip_cidr =", "floating_ip_removed_dist(self, fip_cidr): \"\"\"Remove floating IP from FIP namespace.\"\"\" floating_ip =", "= self.snat_namespace.name prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface, namespace=ns_name): self.driver.unplug(snat_interface, namespace=ns_name,", "The index value has to be 32 bits or less", "KIND, either express or implied. See the # License for", "namespace=self.ns_name) ns_ipd.route.delete_gateway(gateway, table=snat_idx) ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx) except Exception: LOG.exception(_LE('DVR: removed", "generated entries. \"\"\" net = netaddr.IPNetwork(ip_cidr) if net.version == 6:", "= (self.agent_conf.agent_mode == 'dvr_snat' and self.get_gw_port_host() == self.host) if not", "32768. For IPv4 use the numeric value of the cidr.", "ip_lib.IPWrapper(namespace=self.ns_name) ns_ipd.route.add_gateway(gateway, table=snat_idx) ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx) ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' 'send_redirects=0'", "'id' not in port['subnet']: return subnet_id = port['subnet']['id'] # TODO(Carl)", "and self.get_gw_port_host() == self.host) if not is_this_snat_host: return ns_name =", "freed range to extend smaller values so that they become", "dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id']) self._set_subnet_info(sn_port) interface_name = self.get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name, sn_port['network_id'], sn_port['id'], sn_port['ip_cidr'],", "used for IPv6 rule index MASK_30 = 0x3fffffff class DvrRouter(router.RouterInfo):", "below for bulk calls interface_name = self.get_internal_device_name(port['id']) device = ip_lib.IPDevice(interface_name,", "if the subnet is attached to the router if not", "gateway for a dvr. The # first step is to", "self.get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name, sn_port['network_id'], sn_port['id'], sn_port['ip_cidr'], sn_port['mac_address'], interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX) self._set_subnet_arp_info(port)", "= fip_cidr.split('/')[0] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) if self.rtr_fip_subnet", "sn_port['id'], sn_port['ip_cidr'], sn_port['mac_address'], interface_name, dvr_snat_ns.SNAT_INT_DEV_PREFIX) self._set_subnet_arp_info(port) def _dvr_internal_network_removed(self, port): if", "that they become greater than system generated entries. \"\"\" net", "FIP namespace ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip, ip_cidr) return l3_constants.FLOATINGIP_STATUS_ACTIVE def", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "as logging from oslo_utils import excutils from neutron.agent.l3 import dvr_fip_ns", "self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, []) def get_snat_int_device_name(self, port_id): long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id", "bits. Use the freed range to extend smaller values so", "self.dist_fip_count - 1 if self.dist_fip_count == 0: #remove default route", "f in fips: if f['subnet_id'] == subnet_id: return port def", "method should contain the # code in the L3 agent", "= self._get_snat_idx(ip_cidr) ns_ipr = ip_lib.IPRule(namespace=self.ns_name) ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipwrapr", "fip, fip_cidr): \"\"\"Add floating IP to FIP namespace.\"\"\" floating_ip =", "implied. See the # License for the specific language governing", "fips: if f['subnet_id'] == subnet_id: return port def _update_arp_entry(self, ip,", "\"\"\"Configures NAT rules for Floating IPs for DVR. Remove all", "if this is the last FIP for this router self.dist_fip_count", "removes an external gateway for a dvr. The # first", "return [i for i in floating_ips if i['host'] == self.host]", "a crc32 bit hash and xor-fold to 30 bits. Use", "LOG.exception(_LE('DVR: error adding redirection logic')) def _snat_redirect_remove(self, gateway, sn_port, sn_int):", "fixed_ip in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], subnet_id, 'add') def _map_internal_interfaces(self, int_port,", "from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3", "snat_idx, snat_idx) ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' 'send_redirects=0' % sn_int]) except Exception:", "rule_pr = self.floating_ips_dict[floating_ip] ip_rule = ip_lib.IPRule(namespace=self.ns_name) ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) self.fip_ns.deallocate_rule_priority(rule_pr)", "bits or less but more than the system generated entries", "based on subnet_id.\"\"\" router_ports = self.router.get(l3_constants.INTERFACE_KEY, []) for port in", "error adding redirection logic')) def _snat_redirect_remove(self, gateway, sn_port, sn_int): \"\"\"Removes", "self.driver.unplug(snat_interface, namespace=ns_name, prefix=prefix) def internal_network_removed(self, port): self._dvr_internal_network_removed(port) super(DvrRouter, self).internal_network_removed(port) def", "This is a sign that dvr needs two router classes.", "'add': device.neigh.add(ip, mac) elif operation == 'delete': device.neigh.delete(ip, mac) except", "self.dist_fip_count = None self.snat_namespace = None def get_floating_ips(self): \"\"\"Filter Floating", "delete_snat_namespace(self): # TODO(mlavalle): in the near future, this method should", "return sn_port = self._map_internal_interfaces(port, self.snat_ports) if not sn_port: return #", "and floating IP namespace link self.rtr_fip_subnet = None self.dist_fip_count =", "# And add them back if the action is add_rules", "add_rules if action == 'add_rules' and interface_name: rule = ('POSTROUTING',", "back if the action is add_rules if action == 'add_rules'", "code in the L3 agent that removes an external gateway", "snat_idx + MASK_30 else: snat_idx = net.value return snat_idx def", "None ns_ip.del_veth(fip_2_rtr_name) is_last = self.fip_ns.unsubscribe(self.router_id) if is_last: # TODO(Carl) I", "if snat_idx < 32768: snat_idx = snat_idx + MASK_30 else:", "interface port.\"\"\" fixed_ip = int_port['fixed_ips'][0] subnet_id = fixed_ip['subnet_id'] match_port =", "not is_this_snat_host: return snat_interface = ( self.get_snat_int_device_name(sn_port['id'])) ns_name = self.snat_namespace.name", "router. subnet_ports = self.agent.get_ports_by_subnet(subnet_id) for p in subnet_ports: if p['device_owner']", "IPv4 use the numeric value of the cidr. For IPv6", "obtain # a copy of the License at # #", "logic')) def _snat_redirect_remove(self, gateway, sn_port, sn_int): \"\"\"Removes rules and routes", "prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX if ip_lib.device_exists(snat_interface, namespace=ns_name): self.driver.unplug(snat_interface, namespace=ns_name, prefix=prefix) def", "ip_cidr): super(DvrRouter, self).remove_floating_ip(device, ip_cidr) self.floating_ip_removed_dist(ip_cidr) def create_snat_namespace(self): # TODO(mlavalle): in", "sn_port = self._map_internal_interfaces(port, self.snat_ports) if not sn_port: return # DVR", "device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) if operation == 'add': device.neigh.add(ip, mac)", "import excutils from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns", "neutron.i18n import _LE LOG = logging.getLogger(__name__) # xor-folding mask used", "rules. This is safe because if use_namespaces is set as", "net.version == 6: # the crc32 & 0xffffffff is for", "here self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'], self.agent_conf, self.driver, self.use_ipv6) self.snat_namespace.create() return self.snat_namespace", "help but think that another router could # come in", "ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) device.route.add_route(fip_cidr, str(rtr_2_fip.ip)) interface_name = ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) ip_lib.send_garp_for_proxyarp(fip_ns_name,", "two could end up conflicting on # creating/destroying interfaces and", "range to extend smaller values snat_idx = (snat_idx >> 30)", "# destroying it. The two could end up conflicting on", "if operation == 'add': device.neigh.add(ip, mac) elif operation == 'delete':", "safe because if use_namespaces is set as False then the", "interface_name = self.get_snat_int_device_name(sn_port['id']) self._internal_network_added( ns_name, sn_port['network_id'], sn_port['id'], sn_port['ip_cidr'], sn_port['mac_address'], interface_name,", "table=dvr_fip_ns.FIP_RT_TBL) self.fip_ns.local_subnets.release(self.router_id) self.rtr_fip_subnet = None ns_ip.del_veth(fip_2_rtr_name) is_last = self.fip_ns.unsubscribe(self.router_id) if", "port for the given internal interface port.\"\"\" fixed_ip = int_port['fixed_ips'][0]", "mac) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE(\"DVR: Failed updating arp entry\"))", "neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import router_info as router from", "subnet_id, operation): \"\"\"Add or delete arp entry into router namespace", "router_info as router from neutron.agent.linux import ip_lib from neutron.common import", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "neutron.common import utils as common_utils from neutron.i18n import _LE LOG", "in the L3 agent that removes an external gateway for", "if 'id' not in port['subnet']: return subnet_id = port['subnet']['id'] #", "= None self.snat_namespace = None def get_floating_ips(self): \"\"\"Filter Floating IPs", "self._get_internal_port(subnet_id) # update arp entry only if the subnet is", "floating_ip = fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address'] rule_pr = self.fip_ns.allocate_rule_priority() self.floating_ips_dict[floating_ip]", "'! -i %(interface_name)s ' '! -o %(interface_name)s -m conntrack !", "= dvr_snat_ns.SnatNamespace(self.router['id'], self.agent_conf, self.driver, self.use_ipv6) self.snat_namespace.create() return self.snat_namespace def delete_snat_namespace(self):", "Exception: LOG.exception(_LE('DVR: removed snat failed')) def get_gw_port_host(self): host = self.router.get('gw_port_host')", "def floating_ip_removed_dist(self, fip_cidr): \"\"\"Remove floating IP from FIP namespace.\"\"\" floating_ip", "< 32768: snat_idx = snat_idx + MASK_30 else: snat_idx =", "# Copyright (c) 2015 Openstack Foundation # # Licensed under", "self.fip_ns.get_name() if floating_ip in self.floating_ips_dict: rule_pr = self.floating_ips_dict[floating_ip] ip_rule =", "= None def add_floating_ip(self, fip, interface_name, device): if not self._add_fip_addr_to_device(fip,", "_map_internal_interfaces(self, int_port, snat_ports): \"\"\"Return the SNAT port for the given", "router if not port: return try: # TODO(mrsmith): optimize the", "device.neigh.delete(ip, mac) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE(\"DVR: Failed updating arp", "check if this is the last FIP for this router", "the # code in the L3 agent that removes an", "= self.router.get('gw_port_host') if not host: LOG.debug(\"gw_port_host missing from router: %s\",", "DvrRouter(router.RouterInfo): def __init__(self, agent, host, *args, **kwargs): super(DvrRouter, self).__init__(*args, **kwargs)", "(self.agent_conf.agent_mode == 'dvr_snat' and self.ex_gw_port['binding:host_id'] == self.host) if not is_this_snat_host:", "# TODO(Carl) I can't help but think that another router", "ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name) ns_ipd.route.add_gateway(gateway, table=snat_idx) ns_ipr.rule.add(ip_cidr,", "Python 2.6 and 3.0 compatibility snat_idx = binascii.crc32(ip_cidr) & 0xffffffff", "not in port['subnet']: return subnet_id = port['subnet']['id'] # TODO(Carl) Can", "2.0 (the \"License\"); you may # not use this file", "by applicable law or agreed to in writing, software #", "if action == 'add_rules' and interface_name: rule = ('POSTROUTING', '!", "self._map_internal_interfaces(port, snat_ports) if not sn_port: return interface_name = self.get_internal_device_name(port['id']) self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'],", "import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import router_info", "self.floating_ip_added_dist(fip, ip_cidr) return l3_constants.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device, ip_cidr): super(DvrRouter, self).remove_floating_ip(device,", "floating IP to FIP namespace.\"\"\" floating_ip = fip['floating_ip_address'] fixed_ip =", "Exception: LOG.exception(_LE('DVR: error adding redirection logic')) def _snat_redirect_remove(self, gateway, sn_port,", "self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'], port, interface_name) is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and self.ex_gw_port['binding:host_id']", "0: #remove default route entry device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name) ns_ip", "entry device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name) ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name) device.route.delete_gateway(str(fip_2_rtr.ip), table=dvr_fip_ns.FIP_RT_TBL)", "namespace=self.ns_name) if operation == 'add': device.neigh.add(ip, mac) elif operation ==", "'add') def _map_internal_interfaces(self, int_port, snat_ports): \"\"\"Return the SNAT port for", "== 'add': device.neigh.add(ip, mac) elif operation == 'delete': device.neigh.delete(ip, mac)", "# Add back the jump to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')", "namespace=self.ns_name) ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name) device.route.delete_gateway(str(fip_2_rtr.ip), table=dvr_fip_ns.FIP_RT_TBL) self.fip_ns.local_subnets.release(self.router_id) self.rtr_fip_subnet = None", "Foundation # # Licensed under the Apache License, Version 2.0", "router self.dist_fip_count = self.dist_fip_count - 1 if self.dist_fip_count == 0:", "self.agent_conf.send_arp_for_ha) # update internal structures self.dist_fip_count = self.dist_fip_count + 1", "it. The two could end up conflicting on # creating/destroying", "# under the License. import binascii import netaddr from oslo_log", "permissions and limitations # under the License. import binascii import", "mac) elif operation == 'delete': device.neigh.delete(ip, mac) except Exception: with", "= self.get_ex_gw_port() if not ex_gw_port: return snat_ports = self.get_snat_interfaces() sn_port", "ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name) ns_ipd.route.add_gateway(gateway, table=snat_idx) ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx) ns_ipwrapr.netns.execute(['sysctl', '-w',", "self.fip_ns.delete() self.fip_ns = None def add_floating_ip(self, fip, interface_name, device): if", "f['subnet_id'] == subnet_id: return port def _update_arp_entry(self, ip, mac, subnet_id,", "agent.\"\"\" floating_ips = super(DvrRouter, self).get_floating_ips() return [i for i in", "and self.ex_gw_port['binding:host_id'] == self.host) if not is_this_snat_host: return snat_interface =", "applicable law or agreed to in writing, software # distributed", "snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30) if", "return snat_ports = self.get_snat_interfaces() sn_port = self._map_internal_interfaces(port, snat_ports) if not", "**kwargs): super(DvrRouter, self).__init__(*args, **kwargs) self.agent = agent self.host = host", "device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name) ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name) device.route.delete_gateway(str(fip_2_rtr.ip), table=dvr_fip_ns.FIP_RT_TBL) self.fip_ns.local_subnets.release(self.router_id)", "this method should contain the # code in the L3", "on subnet_id.\"\"\" router_ports = self.router.get(l3_constants.INTERFACE_KEY, []) for port in router_ports:", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "ip_lib.device_exists(snat_interface, namespace=ns_name): self.driver.unplug(snat_interface, namespace=ns_name, prefix=prefix) def internal_network_removed(self, port): self._dvr_internal_network_removed(port) super(DvrRouter,", "self.fip_ns.get_int_device_name(self.router_id) if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip,", "snat_interface = ( self.get_snat_int_device_name(sn_port['id'])) ns_name = self.snat_namespace.name prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX", "self.rtr_fip_subnet = None ns_ip.del_veth(fip_2_rtr_name) is_last = self.fip_ns.unsubscribe(self.router_id) if is_last: #", "rule_pr = self.fip_ns.allocate_rule_priority() self.floating_ips_dict[floating_ip] = rule_pr fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) ip_rule", "or delete arp entry into router namespace for the subnet.\"\"\"", "FIP namespace.\"\"\" floating_ip = fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address'] rule_pr =", "= port['subnet']['id'] # TODO(Carl) Can we eliminate the need to", "def _dvr_internal_network_removed(self, port): if not self.ex_gw_port: return sn_port = self._map_internal_interfaces(port,", "1 if self.dist_fip_count == 0: #remove default route entry device", "port.\"\"\" fixed_ip = int_port['fixed_ips'][0] subnet_id = fixed_ip['subnet_id'] match_port = [p", "this namespace while this is # destroying it. The two", "def _set_subnet_arp_info(self, port): \"\"\"Set ARP info retrieved from Plugin for", "they become greater than system generated entries. \"\"\" net =", "# License for the specific language governing permissions and limitations", "fips = port['fixed_ips'] for f in fips: if f['subnet_id'] ==", "namespace=ns_name): self.driver.unplug(snat_interface, namespace=ns_name, prefix=prefix) def internal_network_removed(self, port): self._dvr_internal_network_removed(port) super(DvrRouter, self).internal_network_removed(port)", "ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx) except Exception: LOG.exception(_LE('DVR: removed snat failed')) def", "using this namespace while this is # destroying it. The", "floating_ip = fip_cidr.split('/')[0] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) if", "def get_snat_int_device_name(self, port_id): long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id return long_name[:self.driver.DEV_NAME_LEN]", "ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name) ns_ipd.route.delete_gateway(gateway, table=snat_idx) ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx) except", "if self.rtr_fip_subnet is None: self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate( self.router_id) rtr_2_fip, fip_2_rtr", "router, otherwise each router's NAT rules will be in their", "def delete_snat_namespace(self): # TODO(mlavalle): in the near future, this method", "hash and xor-fold to 30 bits. Use the freed range", "{'interface_name': interface_name}) self.iptables_manager.ipv4['nat'].add_rule(*rule) self.iptables_manager.apply() def floating_ip_added_dist(self, fip, fip_cidr): \"\"\"Add floating", "-j ACCEPT' % {'interface_name': interface_name}) self.iptables_manager.ipv4['nat'].add_rule(*rule) self.iptables_manager.apply() def floating_ip_added_dist(self, fip,", "and want to start using this namespace while this is", "host: LOG.debug(\"gw_port_host missing from router: %s\", self.router['id']) return host def", "language governing permissions and limitations # under the License. import", "router: %s\", self.router['id']) return host def internal_network_added(self, port): super(DvrRouter, self).internal_network_added(port)", "License. You may obtain # a copy of the License", "self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'], self.agent_conf, self.driver, self.use_ipv6) self.snat_namespace.create() return self.snat_namespace def", "ANY KIND, either express or implied. See the # License", "this is the last FIP for this router self.dist_fip_count =", "l3_constants.ROUTER_INTERFACE_OWNERS: for fixed_ip in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], subnet_id, 'add') def", "== 'delete': device.neigh.delete(ip, mac) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE(\"DVR: Failed", "so that they become greater than system generated entries. \"\"\"", "IPs for DVR. Remove all the rules. This is safe", "binascii.crc32(ip_cidr) & 0xffffffff # xor-fold the hash to reserve upper", "for Python 2.6 and 3.0 compatibility snat_idx = binascii.crc32(ip_cidr) &", "crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility", "self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING') self.iptables_manager.ipv4['nat'].empty_chain('snat') # Add back the jump to float-snat self.iptables_manager.ipv4['nat'].add_rule('snat',", "self).internal_network_added(port) ex_gw_port = self.get_ex_gw_port() if not ex_gw_port: return snat_ports =", "or implied. See the # License for the specific language", "values so that they become greater than system generated entries." ]
[ "module defines several custom warning category classes. \"\"\" class SiderWarning(Warning):", "class SiderWarning(Warning): \"\"\"All warning classes used by Sider extend this", "for warnings about performance worries. Operations that warn this category", "\"\"\"The category for warnings about performance worries. Operations that warn", "categories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module defines several custom warning category classes.", "by Sider extend this base class.\"\"\" class PerformanceWarning(SiderWarning, RuntimeWarning): \"\"\"The", "RuntimeWarning): \"\"\"The category for warnings about performance worries. Operations that", "that warn this category would work but be inefficient. \"\"\"", "but be inefficient. \"\"\" class TransactionWarning(SiderWarning, RuntimeWarning): \"\"\"The category for", "used by Sider extend this base class.\"\"\" class PerformanceWarning(SiderWarning, RuntimeWarning):", "category would work but be inefficient. \"\"\" class TransactionWarning(SiderWarning, RuntimeWarning):", "SiderWarning(Warning): \"\"\"All warning classes used by Sider extend this base", "--- Warning categories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module defines several custom warning", "category classes. \"\"\" class SiderWarning(Warning): \"\"\"All warning classes used by", "custom warning category classes. \"\"\" class SiderWarning(Warning): \"\"\"All warning classes", "\"\"\" class SiderWarning(Warning): \"\"\"All warning classes used by Sider extend", "warnings about performance worries. Operations that warn this category would", "warning category classes. \"\"\" class SiderWarning(Warning): \"\"\"All warning classes used", "performance worries. Operations that warn this category would work but", "Warning categories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module defines several custom warning category", "class.\"\"\" class PerformanceWarning(SiderWarning, RuntimeWarning): \"\"\"The category for warnings about performance", "this base class.\"\"\" class PerformanceWarning(SiderWarning, RuntimeWarning): \"\"\"The category for warnings", "Operations that warn this category would work but be inefficient.", "inefficient. \"\"\" class TransactionWarning(SiderWarning, RuntimeWarning): \"\"\"The category for warnings about", "category for warnings about performance worries. Operations that warn this", "class PerformanceWarning(SiderWarning, RuntimeWarning): \"\"\"The category for warnings about performance worries.", "defines several custom warning category classes. \"\"\" class SiderWarning(Warning): \"\"\"All", "base class.\"\"\" class PerformanceWarning(SiderWarning, RuntimeWarning): \"\"\"The category for warnings about", "PerformanceWarning(SiderWarning, RuntimeWarning): \"\"\"The category for warnings about performance worries. Operations", "this category would work but be inefficient. \"\"\" class TransactionWarning(SiderWarning,", "\"\"\" class TransactionWarning(SiderWarning, RuntimeWarning): \"\"\"The category for warnings about transactions.\"\"\"", "work but be inefficient. \"\"\" class TransactionWarning(SiderWarning, RuntimeWarning): \"\"\"The category", "be inefficient. \"\"\" class TransactionWarning(SiderWarning, RuntimeWarning): \"\"\"The category for warnings", "warning classes used by Sider extend this base class.\"\"\" class", "extend this base class.\"\"\" class PerformanceWarning(SiderWarning, RuntimeWarning): \"\"\"The category for", "about performance worries. Operations that warn this category would work", "several custom warning category classes. \"\"\" class SiderWarning(Warning): \"\"\"All warning", "classes. \"\"\" class SiderWarning(Warning): \"\"\"All warning classes used by Sider", "\"\"\":mod:`sider.warnings` --- Warning categories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module defines several custom", "warn this category would work but be inefficient. \"\"\" class", "\"\"\"All warning classes used by Sider extend this base class.\"\"\"", "<reponame>PCManticore/sider \"\"\":mod:`sider.warnings` --- Warning categories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module defines several", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module defines several custom warning category classes. \"\"\"", "This module defines several custom warning category classes. \"\"\" class", "Sider extend this base class.\"\"\" class PerformanceWarning(SiderWarning, RuntimeWarning): \"\"\"The category", "worries. Operations that warn this category would work but be", "classes used by Sider extend this base class.\"\"\" class PerformanceWarning(SiderWarning,", "would work but be inefficient. \"\"\" class TransactionWarning(SiderWarning, RuntimeWarning): \"\"\"The" ]
[ "siteUrl isAdult nextAiringEpisode { timeUntilAiring episode } } } \"\"\"", "native } synonyms status description startDate { year month day", "} bannerImage genres averageScore siteUrl isAdult popularity } } }", "User(search: $search) { id name html_about: about(asHtml: true) about avatar", "$sort: [MediaSort] = [SEARCH_MATCH], $exclude: MediaFormat, $isAdult: Boolean ) {", "{ id name html_about: about(asHtml: true) about avatar { large", "} endDate { year month day } episodes chapters volumes", "\"\"\" MEDIA_BY_ID = \"\"\" query ($id: Int, $type: MediaType) {", "MediaType) { Media(id: $id, type: $type) { id type format", "search: $search, type: $type, sort: $sort, format_not: $exclude, isAdult: $isAdult)", "watchedTime chaptersRead } } } \"\"\" USER_BY_ID = \"\"\" query", "$search: String, $type: MediaType, $sort: [MediaSort] = [SEARCH_MATCH], $exclude: MediaFormat,", "\"\"\" query ( $id: Int, $page: Int, $perPage: Int, $search:", "} \"\"\" USER_SEARCH = \"\"\" query ($search: String) { User(search:", "USER_SEARCH = \"\"\" query ($search: String) { User(search: $search) {", "$perPage: Int, $search: String, $type: MediaType, $sort: [MediaSort] = [SEARCH_MATCH],", "averageScore siteUrl isAdult popularity } } } \"\"\" USER_SEARCH =", "html_about: about(asHtml: true) about avatar { large } bannerImage siteUrl", "[MediaSort] = [SEARCH_MATCH], $exclude: MediaFormat, $isAdult: Boolean ) { Page(page:", "Int, $perPage: Int, $search: String, $type: MediaType, $sort: [MediaSort] =", "about avatar { large } bannerImage siteUrl stats { watchedTime", "{ large color } bannerImage genres averageScore siteUrl isAdult popularity", "large color } bannerImage genres averageScore siteUrl isAdult popularity }", "isAdult popularity } } } \"\"\" USER_SEARCH = \"\"\" query", "} \"\"\" MEDIA_PAGED = \"\"\" query ( $id: Int, $page:", "avatar { large } bannerImage siteUrl stats { watchedTime chaptersRead", "{ year month day } episodes chapters volumes coverImage {", "$type) { id type format title { english romaji native", "($id: Int) { User(id: $id) { id name html_about: about(asHtml:", "\"\"\" query ($search: String, $type: MediaType, $exclude: MediaFormat, $isAdult: Boolean)", "bannerImage genres averageScore siteUrl isAdult popularity } } } \"\"\"", "\"\"\" USER_BY_ID = \"\"\" query ($id: Int) { User(id: $id)", "User(id: $id) { id name html_about: about(asHtml: true) about avatar", "} \"\"\" USER_BY_ID = \"\"\" query ($id: Int) { User(id:", "siteUrl stats { watchedTime chaptersRead } } } \"\"\" USER_BY_ID", "{ watchedTime chaptersRead } } } \"\"\" USER_BY_ID = \"\"\"", "($search: String) { User(search: $search) { id name html_about: about(asHtml:", "MEDIA_BY_ID = \"\"\" query ($id: Int, $type: MediaType) { Media(id:", "Int, $page: Int, $perPage: Int, $search: String, $type: MediaType, $sort:", "status description startDate { year month day } endDate {", "($id: Int, $type: MediaType) { Media(id: $id, type: $type) {", "query ($search: String) { User(search: $search) { id name html_about:", "$search, type: $type, sort: $sort, format_not: $exclude, isAdult: $isAdult) {", "query ($id: Int) { User(id: $id) { id name html_about:", "stats { watchedTime chaptersRead } } } \"\"\" USER_BY_ID =", "id type format title { english romaji native } synonyms", "Media(search: $search, type: $type, format_not: $exclude, isAdult: $isAdult) { id", "$type: MediaType, $sort: [MediaSort] = [SEARCH_MATCH], $exclude: MediaFormat, $isAdult: Boolean", "episode } } } \"\"\" MEDIA_PAGED = \"\"\" query (", "about(asHtml: true) about avatar { large } bannerImage siteUrl stats", "MEDIA_SEARCH = \"\"\" query ($search: String, $type: MediaType, $exclude: MediaFormat,", "nextAiringEpisode { timeUntilAiring episode } } } \"\"\" MEDIA_BY_ID =", "Boolean) { Media(search: $search, type: $type, format_not: $exclude, isAdult: $isAdult)", "= \"\"\" query ($id: Int, $type: MediaType) { Media(id: $id,", "isAdult nextAiringEpisode { timeUntilAiring episode } } } \"\"\" MEDIA_PAGED", "String, $type: MediaType, $exclude: MediaFormat, $isAdult: Boolean) { Media(search: $search,", "day } episodes chapters volumes coverImage { large color }", "type: $type) { id type format title { english romaji", "popularity } } } \"\"\" USER_SEARCH = \"\"\" query ($search:", "format title { english romaji native } synonyms status description", "year month day } episodes chapters volumes coverImage { large", "$page: Int, $perPage: Int, $search: String, $type: MediaType, $sort: [MediaSort]", "{ media(id: $id, search: $search, type: $type, sort: $sort, format_not:", "chaptersRead } } } \"\"\" USER_BY_ID = \"\"\" query ($id:", "{ Media(id: $id, type: $type) { id type format title", "type: $type, format_not: $exclude, isAdult: $isAdult) { id type format", ") { Page(page: $page, perPage: $perPage) { media(id: $id, search:", "{ english romaji native } synonyms status description startDate {", "chapters coverImage { large color } bannerImage genres averageScore siteUrl", "\"\"\" MEDIA_PAGED = \"\"\" query ( $id: Int, $page: Int,", "MediaFormat, $isAdult: Boolean ) { Page(page: $page, perPage: $perPage) {", "MediaFormat, $isAdult: Boolean) { Media(search: $search, type: $type, format_not: $exclude,", "{ id type format title { english romaji native }", "} } } \"\"\" USER_BY_ID = \"\"\" query ($id: Int)", "format_not: $exclude, isAdult: $isAdult) { id type format title {", "MEDIA_PAGED = \"\"\" query ( $id: Int, $page: Int, $perPage:", "{ User(id: $id) { id name html_about: about(asHtml: true) about", "$type: MediaType) { Media(id: $id, type: $type) { id type", "$page, perPage: $perPage) { media(id: $id, search: $search, type: $type,", "} endDate { year month day } episodes chapters coverImage", "MediaType, $sort: [MediaSort] = [SEARCH_MATCH], $exclude: MediaFormat, $isAdult: Boolean )", "true) about avatar { large } bannerImage siteUrl stats {", "startDate { year month day } endDate { year month", "endDate { year month day } episodes chapters coverImage {", "$id) { id name html_about: about(asHtml: true) about avatar {", "$exclude, isAdult: $isAdult) { id type format title { english", "id name html_about: about(asHtml: true) about avatar { large }", "Page(page: $page, perPage: $perPage) { media(id: $id, search: $search, type:", "day } episodes chapters coverImage { large color } bannerImage", "$exclude: MediaFormat, $isAdult: Boolean) { Media(search: $search, type: $type, format_not:", "nextAiringEpisode { timeUntilAiring episode } } } \"\"\" MEDIA_PAGED =", "} bannerImage genres averageScore siteUrl isAdult nextAiringEpisode { timeUntilAiring episode", "query ($id: Int, $type: MediaType) { Media(id: $id, type: $type)", "episodes chapters volumes coverImage { large color } bannerImage genres", "month day } endDate { year month day } episodes", "endDate { year month day } episodes chapters volumes coverImage", "= \"\"\" query ($search: String) { User(search: $search) { id", "= \"\"\" query ($id: Int) { User(id: $id) { id", "( $id: Int, $page: Int, $perPage: Int, $search: String, $type:", "year month day } endDate { year month day }", "bannerImage siteUrl stats { watchedTime chaptersRead } } } \"\"\"", "chapters volumes coverImage { large color } bannerImage genres averageScore", "} \"\"\" MEDIA_BY_ID = \"\"\" query ($id: Int, $type: MediaType)", "Int, $search: String, $type: MediaType, $sort: [MediaSort] = [SEARCH_MATCH], $exclude:", "large } bannerImage siteUrl stats { watchedTime chaptersRead } }", "$type, format_not: $exclude, isAdult: $isAdult) { id type format title", "\"\"\" query ($id: Int, $type: MediaType) { Media(id: $id, type:", "= [SEARCH_MATCH], $exclude: MediaFormat, $isAdult: Boolean ) { Page(page: $page,", "romaji native } synonyms status description startDate { year month", "} } } \"\"\" MEDIA_BY_ID = \"\"\" query ($id: Int,", "query ( $id: Int, $page: Int, $perPage: Int, $search: String,", "color } bannerImage genres averageScore siteUrl isAdult popularity } }", "} bannerImage siteUrl stats { watchedTime chaptersRead } } }", "MediaType, $exclude: MediaFormat, $isAdult: Boolean) { Media(search: $search, type: $type,", "day } endDate { year month day } episodes chapters", "{ year month day } endDate { year month day", "isAdult nextAiringEpisode { timeUntilAiring episode } } } \"\"\" MEDIA_BY_ID", "month day } episodes chapters coverImage { large color }", "$search) { id name html_about: about(asHtml: true) about avatar {", "sort: $sort, format_not: $exclude, isAdult: $isAdult) { id type format", "$sort, format_not: $exclude, isAdult: $isAdult) { id type format title", "title { english romaji native } synonyms status description startDate", "$type, sort: $sort, format_not: $exclude, isAdult: $isAdult) { id type", "} } } \"\"\" MEDIA_PAGED = \"\"\" query ( $id:", "String, $type: MediaType, $sort: [MediaSort] = [SEARCH_MATCH], $exclude: MediaFormat, $isAdult:", "color } bannerImage genres averageScore siteUrl isAdult nextAiringEpisode { timeUntilAiring", "Media(id: $id, type: $type) { id type format title {", "$isAdult: Boolean) { Media(search: $search, type: $type, format_not: $exclude, isAdult:", "{ Media(search: $search, type: $type, format_not: $exclude, isAdult: $isAdult) {", "} episodes chapters volumes coverImage { large color } bannerImage", "perPage: $perPage) { media(id: $id, search: $search, type: $type, sort:", "query ($search: String, $type: MediaType, $exclude: MediaFormat, $isAdult: Boolean) {", "timeUntilAiring episode } } } \"\"\" MEDIA_BY_ID = \"\"\" query", "genres averageScore siteUrl isAdult nextAiringEpisode { timeUntilAiring episode } }", "bannerImage genres averageScore siteUrl isAdult nextAiringEpisode { timeUntilAiring episode }", "large color } bannerImage genres averageScore siteUrl isAdult nextAiringEpisode {", "$search, type: $type, format_not: $exclude, isAdult: $isAdult) { id type", "type: $type, sort: $sort, format_not: $exclude, isAdult: $isAdult) { id", "$id, search: $search, type: $type, sort: $sort, format_not: $exclude, isAdult:", "description startDate { year month day } endDate { year", "coverImage { large color } bannerImage genres averageScore siteUrl isAdult", "$exclude: MediaFormat, $isAdult: Boolean ) { Page(page: $page, perPage: $perPage)", "{ Page(page: $page, perPage: $perPage) { media(id: $id, search: $search,", "} } \"\"\" MEDIA_BY_ID = \"\"\" query ($id: Int, $type:", "english romaji native } synonyms status description startDate { year", "Int) { User(id: $id) { id name html_about: about(asHtml: true)", "($search: String, $type: MediaType, $exclude: MediaFormat, $isAdult: Boolean) { Media(search:", "} } \"\"\" USER_BY_ID = \"\"\" query ($id: Int) {", "siteUrl isAdult popularity } } } \"\"\" USER_SEARCH = \"\"\"", "= \"\"\" query ($search: String, $type: MediaType, $exclude: MediaFormat, $isAdult:", "} } \"\"\" USER_SEARCH = \"\"\" query ($search: String) {", "{ large } bannerImage siteUrl stats { watchedTime chaptersRead }", "genres averageScore siteUrl isAdult popularity } } } \"\"\" USER_SEARCH", "averageScore siteUrl isAdult nextAiringEpisode { timeUntilAiring episode } } }", "episodes chapters coverImage { large color } bannerImage genres averageScore", "{ User(search: $search) { id name html_about: about(asHtml: true) about", "$id, type: $type) { id type format title { english", "volumes coverImage { large color } bannerImage genres averageScore siteUrl", "$isAdult: Boolean ) { Page(page: $page, perPage: $perPage) { media(id:", "$perPage) { media(id: $id, search: $search, type: $type, sort: $sort,", "String) { User(search: $search) { id name html_about: about(asHtml: true)", "$id: Int, $page: Int, $perPage: Int, $search: String, $type: MediaType,", "Int, $type: MediaType) { Media(id: $id, type: $type) { id", "month day } episodes chapters volumes coverImage { large color", "$isAdult) { id type format title { english romaji native", "media(id: $id, search: $search, type: $type, sort: $sort, format_not: $exclude,", "\"\"\" query ($search: String) { User(search: $search) { id name", "{ year month day } episodes chapters coverImage { large", "} } } \"\"\" USER_SEARCH = \"\"\" query ($search: String)", "[SEARCH_MATCH], $exclude: MediaFormat, $isAdult: Boolean ) { Page(page: $page, perPage:", "episode } } } \"\"\" MEDIA_BY_ID = \"\"\" query ($id:", "} synonyms status description startDate { year month day }", "\"\"\" USER_SEARCH = \"\"\" query ($search: String) { User(search: $search)", "{ timeUntilAiring episode } } } \"\"\" MEDIA_PAGED = \"\"\"", "{ timeUntilAiring episode } } } \"\"\" MEDIA_BY_ID = \"\"\"", "name html_about: about(asHtml: true) about avatar { large } bannerImage", "= \"\"\" query ( $id: Int, $page: Int, $perPage: Int,", "Boolean ) { Page(page: $page, perPage: $perPage) { media(id: $id,", "USER_BY_ID = \"\"\" query ($id: Int) { User(id: $id) {", "synonyms status description startDate { year month day } endDate", "\"\"\" query ($id: Int) { User(id: $id) { id name", "$type: MediaType, $exclude: MediaFormat, $isAdult: Boolean) { Media(search: $search, type:", "year month day } episodes chapters coverImage { large color", "timeUntilAiring episode } } } \"\"\" MEDIA_PAGED = \"\"\" query", "} } \"\"\" MEDIA_PAGED = \"\"\" query ( $id: Int,", "} episodes chapters coverImage { large color } bannerImage genres", "type format title { english romaji native } synonyms status", "isAdult: $isAdult) { id type format title { english romaji", "{ large color } bannerImage genres averageScore siteUrl isAdult nextAiringEpisode" ]
[ "suite = unittest.TestSuite() all_names = globals() for name in all_names:", "chainop class BasicChainTest (unittest.TestCase): def testBasicChain(self): double = lambda x:", "def testMultiChain(self): double = lambda x: x * 2 self.assertEqual([62,", "lambda x: x * 2 self.assertEqual(62, chainop.basic_chain((operator.add, double), 2, 31))", "x * 2 self.assertEqual(62, chainop.basic_chain((operator.add, double), 2, 31)) square =", "import operator import sandbox.chainop as chainop class BasicChainTest (unittest.TestCase): def", "if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if __name__ == '__main__':", "= lambda x: x ** 2 self.assertEqual(2**31, chainop.basic_chain((operator.mul, square), 2,", "class MultiChainTest (unittest.TestCase): def testMultiChain(self): double = lambda x: x", "31)) square = lambda x: x ** 2 self.assertEqual([2**31, 3**31],", "31)) def suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names = globals() for", "return suite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite())", "operator import sandbox.chainop as chainop class BasicChainTest (unittest.TestCase): def testBasicChain(self):", "chainop.multi_chains((operator.add, double), (2, 3), 31)) square = lambda x: x", "self.assertEqual([2**31, 3**31], chainop.multi_chains((operator.mul, square), [2, 3], 31)) def suite(suffix=\"Test\"): suite", "2 self.assertEqual(2**31, chainop.basic_chain((operator.mul, square), 2, 31)) class MultiChainTest (unittest.TestCase): def", "2 self.assertEqual([62, 93], chainop.multi_chains((operator.add, double), (2, 3), 31)) square =", "testBasicChain(self): double = lambda x: x * 2 self.assertEqual(62, chainop.basic_chain((operator.add,", "3], 31)) def suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names = globals()", "lambda x: x ** 2 self.assertEqual(2**31, chainop.basic_chain((operator.mul, square), 2, 31))", "all_names = globals() for name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name],", "chainop.multi_chains((operator.mul, square), [2, 3], 31)) def suite(suffix=\"Test\"): suite = unittest.TestSuite()", "unittest.TestSuite() all_names = globals() for name in all_names: if name.endswith(suffix):", "* 2 self.assertEqual(62, chainop.basic_chain((operator.add, double), 2, 31)) square = lambda", "93], chainop.multi_chains((operator.add, double), (2, 3), 31)) square = lambda x:", "x: x ** 2 self.assertEqual(2**31, chainop.basic_chain((operator.mul, square), 2, 31)) class", "31)) class MultiChainTest (unittest.TestCase): def testMultiChain(self): double = lambda x:", "= unittest.TestSuite() all_names = globals() for name in all_names: if", "def testBasicChain(self): double = lambda x: x * 2 self.assertEqual(62,", "2 self.assertEqual([2**31, 3**31], chainop.multi_chains((operator.mul, square), [2, 3], 31)) def suite(suffix=\"Test\"):", "** 2 self.assertEqual(2**31, chainop.basic_chain((operator.mul, square), 2, 31)) class MultiChainTest (unittest.TestCase):", "suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names = globals() for name in", "suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if __name__ == '__main__': runner =", "square = lambda x: x ** 2 self.assertEqual(2**31, chainop.basic_chain((operator.mul, square),", "= lambda x: x ** 2 self.assertEqual([2**31, 3**31], chainop.multi_chains((operator.mul, square),", "import unittest import operator import sandbox.chainop as chainop class BasicChainTest", "(2, 3), 31)) square = lambda x: x ** 2", "lambda x: x * 2 self.assertEqual([62, 93], chainop.multi_chains((operator.add, double), (2,", "square), [2, 3], 31)) def suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names", "sandbox.chainop as chainop class BasicChainTest (unittest.TestCase): def testBasicChain(self): double =", "= lambda x: x * 2 self.assertEqual([62, 93], chainop.multi_chains((operator.add, double),", "x ** 2 self.assertEqual(2**31, chainop.basic_chain((operator.mul, square), 2, 31)) class MultiChainTest", "self.assertEqual([62, 93], chainop.multi_chains((operator.add, double), (2, 3), 31)) square = lambda", "x ** 2 self.assertEqual([2**31, 3**31], chainop.multi_chains((operator.mul, square), [2, 3], 31))", "3**31], chainop.multi_chains((operator.mul, square), [2, 3], 31)) def suite(suffix=\"Test\"): suite =", "** 2 self.assertEqual([2**31, 3**31], chainop.multi_chains((operator.mul, square), [2, 3], 31)) def", "2, 31)) square = lambda x: x ** 2 self.assertEqual(2**31,", "import sandbox.chainop as chainop class BasicChainTest (unittest.TestCase): def testBasicChain(self): double", "self.assertEqual(62, chainop.basic_chain((operator.add, double), 2, 31)) square = lambda x: x", "chainop.basic_chain((operator.add, double), 2, 31)) square = lambda x: x **", "chainop.basic_chain((operator.mul, square), 2, 31)) class MultiChainTest (unittest.TestCase): def testMultiChain(self): double", "globals() for name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return", "= globals() for name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\"))", "self.assertEqual(2**31, chainop.basic_chain((operator.mul, square), 2, 31)) class MultiChainTest (unittest.TestCase): def testMultiChain(self):", "\"test\")) return suite if __name__ == '__main__': runner = unittest.TextTestRunner()", "<reponame>turkeydonkey/nzmath3<filename>sandbox/test/testChainop.py import unittest import operator import sandbox.chainop as chainop class", "(unittest.TestCase): def testBasicChain(self): double = lambda x: x * 2", "x: x ** 2 self.assertEqual([2**31, 3**31], chainop.multi_chains((operator.mul, square), [2, 3],", "x * 2 self.assertEqual([62, 93], chainop.multi_chains((operator.add, double), (2, 3), 31))", "= lambda x: x * 2 self.assertEqual(62, chainop.basic_chain((operator.add, double), 2,", "2 self.assertEqual(62, chainop.basic_chain((operator.add, double), 2, 31)) square = lambda x:", "double = lambda x: x * 2 self.assertEqual(62, chainop.basic_chain((operator.add, double),", "2, 31)) class MultiChainTest (unittest.TestCase): def testMultiChain(self): double = lambda", "x: x * 2 self.assertEqual([62, 93], chainop.multi_chains((operator.add, double), (2, 3),", "def suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names = globals() for name", "(unittest.TestCase): def testMultiChain(self): double = lambda x: x * 2", "31)) square = lambda x: x ** 2 self.assertEqual(2**31, chainop.basic_chain((operator.mul,", "BasicChainTest (unittest.TestCase): def testBasicChain(self): double = lambda x: x *", "name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if", "in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if __name__", "lambda x: x ** 2 self.assertEqual([2**31, 3**31], chainop.multi_chains((operator.mul, square), [2,", "unittest import operator import sandbox.chainop as chainop class BasicChainTest (unittest.TestCase):", "MultiChainTest (unittest.TestCase): def testMultiChain(self): double = lambda x: x *", "double), (2, 3), 31)) square = lambda x: x **", "3), 31)) square = lambda x: x ** 2 self.assertEqual([2**31,", "square = lambda x: x ** 2 self.assertEqual([2**31, 3**31], chainop.multi_chains((operator.mul,", "double), 2, 31)) square = lambda x: x ** 2", "for name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite", "* 2 self.assertEqual([62, 93], chainop.multi_chains((operator.add, double), (2, 3), 31)) square", "x: x * 2 self.assertEqual(62, chainop.basic_chain((operator.add, double), 2, 31)) square", "all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if __name__ ==", "as chainop class BasicChainTest (unittest.TestCase): def testBasicChain(self): double = lambda", "class BasicChainTest (unittest.TestCase): def testBasicChain(self): double = lambda x: x", "double = lambda x: x * 2 self.assertEqual([62, 93], chainop.multi_chains((operator.add,", "name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], \"test\")) return suite if __name__ == '__main__': runner", "square), 2, 31)) class MultiChainTest (unittest.TestCase): def testMultiChain(self): double =", "testMultiChain(self): double = lambda x: x * 2 self.assertEqual([62, 93],", "[2, 3], 31)) def suite(suffix=\"Test\"): suite = unittest.TestSuite() all_names =" ]
[ "from algs import trpo from env_makers import EnvMaker from models", "os.system(\"rm -rf {}\".format(log_dir)) with logger.session(log_dir): env_maker = EnvMaker('Pendulum-v0') env =", "logger.session(log_dir): env_maker = EnvMaker('Pendulum-v0') env = env_maker.make() policy = GaussianMLPPolicy(", "import numpy as np import os import logger log_dir =", "action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) trpo( env=env, env_maker=env_maker, n_envs=16,", "logs os.system(\"rm -rf {}\".format(log_dir)) with logger.session(log_dir): env_maker = EnvMaker('Pendulum-v0') env", "import GaussianMLPPolicy, MLPBaseline from utils import SnapshotSaver import numpy as", "from utils import SnapshotSaver import numpy as np import os", "import trpo from env_makers import EnvMaker from models import GaussianMLPPolicy,", "action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) baseline = MLPBaseline( observation_space=env.observation_space,", "import logger log_dir = \"data/local/trpo-pendulum\" np.random.seed(42) # Clean up existing", "as np import os import logger log_dir = \"data/local/trpo-pendulum\" np.random.seed(42)", "EnvMaker from models import GaussianMLPPolicy, MLPBaseline from utils import SnapshotSaver", "baseline = MLPBaseline( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, )", "GaussianMLPPolicy( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) baseline =", "observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) trpo( env=env, env_maker=env_maker,", "hidden_nonlinearity=chainer.functions.tanh, ) trpo( env=env, env_maker=env_maker, n_envs=16, policy=policy, baseline=baseline, batch_size=10000, n_iters=100,", "env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) baseline = MLPBaseline( observation_space=env.observation_space, action_space=env.action_space,", "policy = GaussianMLPPolicy( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, )", "-rf {}\".format(log_dir)) with logger.session(log_dir): env_maker = EnvMaker('Pendulum-v0') env = env_maker.make()", "existing logs os.system(\"rm -rf {}\".format(log_dir)) with logger.session(log_dir): env_maker = EnvMaker('Pendulum-v0')", "numpy as np import os import logger log_dir = \"data/local/trpo-pendulum\"", "np import os import logger log_dir = \"data/local/trpo-pendulum\" np.random.seed(42) #", "MLPBaseline from utils import SnapshotSaver import numpy as np import", "= GaussianMLPPolicy( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) baseline", "import chainer from algs import trpo from env_makers import EnvMaker", "env = env_maker.make() policy = GaussianMLPPolicy( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64,", "= env_maker.make() policy = GaussianMLPPolicy( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64),", "env_maker.make() policy = GaussianMLPPolicy( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh,", "trpo from env_makers import EnvMaker from models import GaussianMLPPolicy, MLPBaseline", "algs import trpo from env_makers import EnvMaker from models import", "env_maker = EnvMaker('Pendulum-v0') env = env_maker.make() policy = GaussianMLPPolicy( observation_space=env.observation_space,", "observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) baseline = MLPBaseline(", "import SnapshotSaver import numpy as np import os import logger", "MLPBaseline( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) trpo( env=env,", "hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) trpo( env=env, env_maker=env_maker, n_envs=16, policy=policy, baseline=baseline,", "logger log_dir = \"data/local/trpo-pendulum\" np.random.seed(42) # Clean up existing logs", ") baseline = MLPBaseline( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh,", ") trpo( env=env, env_maker=env_maker, n_envs=16, policy=policy, baseline=baseline, batch_size=10000, n_iters=100, snapshot_saver=SnapshotSaver(log_dir),", "env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) trpo( env=env, env_maker=env_maker, n_envs=16, policy=policy,", "\"data/local/trpo-pendulum\" np.random.seed(42) # Clean up existing logs os.system(\"rm -rf {}\".format(log_dir))", "chainer from algs import trpo from env_makers import EnvMaker from", "up existing logs os.system(\"rm -rf {}\".format(log_dir)) with logger.session(log_dir): env_maker =", "python import chainer from algs import trpo from env_makers import", "os import logger log_dir = \"data/local/trpo-pendulum\" np.random.seed(42) # Clean up", "64), hidden_nonlinearity=chainer.functions.tanh, ) trpo( env=env, env_maker=env_maker, n_envs=16, policy=policy, baseline=baseline, batch_size=10000,", "Clean up existing logs os.system(\"rm -rf {}\".format(log_dir)) with logger.session(log_dir): env_maker", "64), hidden_nonlinearity=chainer.functions.tanh, ) baseline = MLPBaseline( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64,", "trpo( env=env, env_maker=env_maker, n_envs=16, policy=policy, baseline=baseline, batch_size=10000, n_iters=100, snapshot_saver=SnapshotSaver(log_dir), )", "env_makers import EnvMaker from models import GaussianMLPPolicy, MLPBaseline from utils", "= EnvMaker('Pendulum-v0') env = env_maker.make() policy = GaussianMLPPolicy( observation_space=env.observation_space, action_space=env.action_space,", "from models import GaussianMLPPolicy, MLPBaseline from utils import SnapshotSaver import", "with logger.session(log_dir): env_maker = EnvMaker('Pendulum-v0') env = env_maker.make() policy =", "hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) baseline = MLPBaseline( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec,", "SnapshotSaver import numpy as np import os import logger log_dir", "np.random.seed(42) # Clean up existing logs os.system(\"rm -rf {}\".format(log_dir)) with", "= \"data/local/trpo-pendulum\" np.random.seed(42) # Clean up existing logs os.system(\"rm -rf", "= MLPBaseline( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=chainer.functions.tanh, ) trpo(", "GaussianMLPPolicy, MLPBaseline from utils import SnapshotSaver import numpy as np", "models import GaussianMLPPolicy, MLPBaseline from utils import SnapshotSaver import numpy", "# Clean up existing logs os.system(\"rm -rf {}\".format(log_dir)) with logger.session(log_dir):", "{}\".format(log_dir)) with logger.session(log_dir): env_maker = EnvMaker('Pendulum-v0') env = env_maker.make() policy", "log_dir = \"data/local/trpo-pendulum\" np.random.seed(42) # Clean up existing logs os.system(\"rm", "#!/usr/bin/env python import chainer from algs import trpo from env_makers", "import os import logger log_dir = \"data/local/trpo-pendulum\" np.random.seed(42) # Clean", "utils import SnapshotSaver import numpy as np import os import", "import EnvMaker from models import GaussianMLPPolicy, MLPBaseline from utils import", "hidden_nonlinearity=chainer.functions.tanh, ) baseline = MLPBaseline( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec, hidden_sizes=(64, 64),", "EnvMaker('Pendulum-v0') env = env_maker.make() policy = GaussianMLPPolicy( observation_space=env.observation_space, action_space=env.action_space, env_spec=env.spec,", "from env_makers import EnvMaker from models import GaussianMLPPolicy, MLPBaseline from" ]
[ "10:17 # @Author: <EMAIL> \"\"\" 正则解析器 \"\"\" try: import xml.etree.cElementTree", "et import re class RegexEngine: def __init__(self, xml, str_): \"\"\"加载正则表。正则表为xml", "'False').lower() == 'true': self._part_tag(root) return list(filter(lambda x: x[1], self.data)) else:", "import xml.etree.ElementTree as et import re class RegexEngine: def __init__(self,", "= self._no_part(root) self.re = ''.join(self.data) + sf return re.findall(self.re, self._string)", "'' self.re = '(?:' + self._no_part(tag) + ')' else: self.re", "@Time : 2019/12/2 10:17 # @Author: <EMAIL> \"\"\" 正则解析器 \"\"\"", "''.join(self.data) + sf return re.findall(self.re, self._string) def _no_part(self, tags): \"\"\"tag标签不分开抽取\"\"\"", "class RegexEngine: def __init__(self, xml, str_): \"\"\"加载正则表。正则表为xml :param xml: 正则表的位置", "+ self._no_part(tag) + ')' else: self.re = self._no_part(tag) else: attrib", "= '' self.data = [] def select(self, tag): \"\"\"根据xml的tag来实现不同的正则提取 :param", "x: x[1], self.data)) else: sf = self._no_part(root) self.re = ''.join(self.data)", "self.data)) else: sf = self._no_part(root) self.re = ''.join(self.data) + sf", "\"\"\" 正则解析器 \"\"\" try: import xml.etree.cElementTree as et except ModuleNotFoundError:", "def __init__(self, xml, str_): \"\"\"加载正则表。正则表为xml :param xml: 正则表的位置 :param str_:", "text + ')' else: self.re += '(?:' + text +", "-*- coding: utf-8 -*- # @Time : 2019/12/2 10:17 #", "self._no_part(tag) else: attrib = tag.attrib text = tag.text.strip() if attrib.get('must',", "self._root.find(tag) attrib = root.attrib if attrib.get('part', 'False').lower() == 'true': self._part_tag(root)", "def string(self): return self._string @string.setter def string(self, str_): self._string =", "list(filter(lambda x: x[1], self.data)) else: sf = self._no_part(root) self.re =", "+ ')' else: self.re += '(?:' + text + ')?'", "+ text + ')?' return self.re def _part_tag(self, tags): \"\"\"tag标签分开提取\"\"\"", "if attrib.get('must', 'true').lower() == 'true': self.re = '(?:' + text", "# @Time : 2019/12/2 10:17 # @Author: <EMAIL> \"\"\" 正则解析器", "re.findall(self.re, self._string) def _no_part(self, tags): \"\"\"tag标签不分开抽取\"\"\" for tag in tags:", "== 'true': self.data.append(self.re) self.re = '' self.re = '(?:' +", "return self.re def _part_tag(self, tags): \"\"\"tag标签分开提取\"\"\" for tag in tags:", "= tag.attrib text = tag.text.strip() if attrib.get('must', 'true').lower() == 'true':", "try: import xml.etree.cElementTree as et except ModuleNotFoundError: import xml.etree.ElementTree as", "self.data = [] def select(self, tag): \"\"\"根据xml的tag来实现不同的正则提取 :param tag: xml的tag标签", "= '(?:' + text + ')' else: self.re += '(?:'", "要匹配的字符串 \"\"\" self._string = str_ self._root = et.parse(xml).getroot() self.re =", ": 2019/12/2 10:17 # @Author: <EMAIL> \"\"\" 正则解析器 \"\"\" try:", "return list(filter(lambda x: x[1], self.data)) else: sf = self._no_part(root) self.re", "= str_ self._root = et.parse(xml).getroot() self.re = '' self.data =", "== 'true': self.re = '(?:' + text + ')' else:", "attrib.get('part', 'False').lower() == 'true': self._part_tag(root) return list(filter(lambda x: x[1], self.data))", "= ''.join(self.data) + sf return re.findall(self.re, self._string) def _no_part(self, tags):", "'true').lower() == 'true': self.data.append(self.re) self.re = '' self.re = '(?:'", "as et import re class RegexEngine: def __init__(self, xml, str_):", "tags): \"\"\"tag标签不分开抽取\"\"\" for tag in tags: if tag: if tag.attrib.get('must',", "-*- # @Time : 2019/12/2 10:17 # @Author: <EMAIL> \"\"\"", "\"\"\"根据xml的tag来实现不同的正则提取 :param tag: xml的tag标签 :return: 正则提取的数据 \"\"\" root = self._root.find(tag)", "str_ self._root = et.parse(xml).getroot() self.re = '' self.data = []", "string(self, str_): self._string = str_ self.re, self.data = '', []", "self.re += '(?:' + text + ')?' return self.re def", "tags: if tag: if tag.attrib.get('must', 'true').lower() == 'true': self.data.append(self.re) self.re", "= tag.text.strip() if attrib.get('must', 'true').lower() == 'true': self.re = '(?:'", "')?' return self.re def _part_tag(self, tags): \"\"\"tag标签分开提取\"\"\" for tag in", "self.re = '' self.data = [] def select(self, tag): \"\"\"根据xml的tag来实现不同的正则提取", "self._no_part(root) self.re = ''.join(self.data) + sf return re.findall(self.re, self._string) def", "= self._root.find(tag) attrib = root.attrib if attrib.get('part', 'False').lower() == 'true':", "sf return re.findall(self.re, self._string) def _no_part(self, tags): \"\"\"tag标签不分开抽取\"\"\" for tag", "else: self.data.append((tag.tag, re.findall(tag.text.strip(), self._string))) @property def string(self): return self._string @string.setter", "self._no_part(tag) + ')' else: self.re = self._no_part(tag) else: attrib =", "2019/12/2 10:17 # @Author: <EMAIL> \"\"\" 正则解析器 \"\"\" try: import", "'true': self.data.append(self.re) self.re = '' self.re = '(?:' + self._no_part(tag)", "= root.attrib if attrib.get('part', 'False').lower() == 'true': self._part_tag(root) return list(filter(lambda", "+ text + ')' else: self.re += '(?:' + text", "self.re def _part_tag(self, tags): \"\"\"tag标签分开提取\"\"\" for tag in tags: if", "+ ')' else: self.re = self._no_part(tag) else: attrib = tag.attrib", "string(self): return self._string @string.setter def string(self, str_): self._string = str_", "tag.attrib.get('must', 'true').lower() == 'true': self.data.append(self.re) self.re = '' self.re =", "+ sf return re.findall(self.re, self._string) def _no_part(self, tags): \"\"\"tag标签不分开抽取\"\"\" for", "#!/usr/bin/python3.7 # -*- coding: utf-8 -*- # @Time : 2019/12/2", "self.re = '' self.re = '(?:' + self._no_part(tag) + ')'", "root.attrib if attrib.get('part', 'False').lower() == 'true': self._part_tag(root) return list(filter(lambda x:", "self.re = '(?:' + self._no_part(tag) + ')' else: self.re =", "\"\"\"tag标签分开提取\"\"\" for tag in tags: if tag: self._part_tag(tag) else: self.data.append((tag.tag,", "else: attrib = tag.attrib text = tag.text.strip() if attrib.get('must', 'true').lower()", "if tag.attrib.get('must', 'true').lower() == 'true': self.data.append(self.re) self.re = '' self.re", "= self._no_part(tag) else: attrib = tag.attrib text = tag.text.strip() if", "xml.etree.ElementTree as et import re class RegexEngine: def __init__(self, xml,", "')' else: self.re += '(?:' + text + ')?' return", "\"\"\" try: import xml.etree.cElementTree as et except ModuleNotFoundError: import xml.etree.ElementTree", "\"\"\" self._string = str_ self._root = et.parse(xml).getroot() self.re = ''", "self._string))) @property def string(self): return self._string @string.setter def string(self, str_):", "tags: if tag: self._part_tag(tag) else: self.data.append((tag.tag, re.findall(tag.text.strip(), self._string))) @property def", "tags): \"\"\"tag标签分开提取\"\"\" for tag in tags: if tag: self._part_tag(tag) else:", "tag in tags: if tag: self._part_tag(tag) else: self.data.append((tag.tag, re.findall(tag.text.strip(), self._string)))", "tag): \"\"\"根据xml的tag来实现不同的正则提取 :param tag: xml的tag标签 :return: 正则提取的数据 \"\"\" root =", "if attrib.get('part', 'False').lower() == 'true': self._part_tag(root) return list(filter(lambda x: x[1],", ":return: 正则提取的数据 \"\"\" root = self._root.find(tag) attrib = root.attrib if", ":param xml: 正则表的位置 :param str_: 要匹配的字符串 \"\"\" self._string = str_", "<EMAIL> \"\"\" 正则解析器 \"\"\" try: import xml.etree.cElementTree as et except", "RegexEngine: def __init__(self, xml, str_): \"\"\"加载正则表。正则表为xml :param xml: 正则表的位置 :param", "[] def select(self, tag): \"\"\"根据xml的tag来实现不同的正则提取 :param tag: xml的tag标签 :return: 正则提取的数据", "def _part_tag(self, tags): \"\"\"tag标签分开提取\"\"\" for tag in tags: if tag:", "')' else: self.re = self._no_part(tag) else: attrib = tag.attrib text", "= [] def select(self, tag): \"\"\"根据xml的tag来实现不同的正则提取 :param tag: xml的tag标签 :return:", "+ ')?' return self.re def _part_tag(self, tags): \"\"\"tag标签分开提取\"\"\" for tag", "as et except ModuleNotFoundError: import xml.etree.ElementTree as et import re", "text + ')?' return self.re def _part_tag(self, tags): \"\"\"tag标签分开提取\"\"\" for", "'' self.data = [] def select(self, tag): \"\"\"根据xml的tag来实现不同的正则提取 :param tag:", "x[1], self.data)) else: sf = self._no_part(root) self.re = ''.join(self.data) +", "str_: 要匹配的字符串 \"\"\" self._string = str_ self._root = et.parse(xml).getroot() self.re", "self.data.append(self.re) self.re = '' self.re = '(?:' + self._no_part(tag) +", "tag.text.strip() if attrib.get('must', 'true').lower() == 'true': self.re = '(?:' +", "正则提取的数据 \"\"\" root = self._root.find(tag) attrib = root.attrib if attrib.get('part',", "root = self._root.find(tag) attrib = root.attrib if attrib.get('part', 'False').lower() ==", "'true').lower() == 'true': self.re = '(?:' + text + ')'", "re.findall(tag.text.strip(), self._string))) @property def string(self): return self._string @string.setter def string(self,", "# -*- coding: utf-8 -*- # @Time : 2019/12/2 10:17", "attrib = tag.attrib text = tag.text.strip() if attrib.get('must', 'true').lower() ==", "else: sf = self._no_part(root) self.re = ''.join(self.data) + sf return", "attrib.get('must', 'true').lower() == 'true': self.re = '(?:' + text +", "import xml.etree.cElementTree as et except ModuleNotFoundError: import xml.etree.ElementTree as et", "def string(self, str_): self._string = str_ self.re, self.data = '',", ":param str_: 要匹配的字符串 \"\"\" self._string = str_ self._root = et.parse(xml).getroot()", "self.re = ''.join(self.data) + sf return re.findall(self.re, self._string) def _no_part(self,", "'true': self.re = '(?:' + text + ')' else: self.re", "def select(self, tag): \"\"\"根据xml的tag来实现不同的正则提取 :param tag: xml的tag标签 :return: 正则提取的数据 \"\"\"", "self._string = str_ self._root = et.parse(xml).getroot() self.re = '' self.data", "正则表的位置 :param str_: 要匹配的字符串 \"\"\" self._string = str_ self._root =", "self.data.append((tag.tag, re.findall(tag.text.strip(), self._string))) @property def string(self): return self._string @string.setter def", "xml, str_): \"\"\"加载正则表。正则表为xml :param xml: 正则表的位置 :param str_: 要匹配的字符串 \"\"\"", "sf = self._no_part(root) self.re = ''.join(self.data) + sf return re.findall(self.re,", "@Author: <EMAIL> \"\"\" 正则解析器 \"\"\" try: import xml.etree.cElementTree as et", "\"\"\"加载正则表。正则表为xml :param xml: 正则表的位置 :param str_: 要匹配的字符串 \"\"\" self._string =", "'(?:' + text + ')' else: self.re += '(?:' +", "coding: utf-8 -*- # @Time : 2019/12/2 10:17 # @Author:", "# @Author: <EMAIL> \"\"\" 正则解析器 \"\"\" try: import xml.etree.cElementTree as", "== 'true': self._part_tag(root) return list(filter(lambda x: x[1], self.data)) else: sf", "str_): \"\"\"加载正则表。正则表为xml :param xml: 正则表的位置 :param str_: 要匹配的字符串 \"\"\" self._string", "attrib = root.attrib if attrib.get('part', 'False').lower() == 'true': self._part_tag(root) return", "select(self, tag): \"\"\"根据xml的tag来实现不同的正则提取 :param tag: xml的tag标签 :return: 正则提取的数据 \"\"\" root", "def _no_part(self, tags): \"\"\"tag标签不分开抽取\"\"\" for tag in tags: if tag:", "in tags: if tag: if tag.attrib.get('must', 'true').lower() == 'true': self.data.append(self.re)", "return self._string @string.setter def string(self, str_): self._string = str_ self.re,", "utf-8 -*- # @Time : 2019/12/2 10:17 # @Author: <EMAIL>", "\"\"\" root = self._root.find(tag) attrib = root.attrib if attrib.get('part', 'False').lower()", "\"\"\"tag标签不分开抽取\"\"\" for tag in tags: if tag: if tag.attrib.get('must', 'true').lower()", "xml: 正则表的位置 :param str_: 要匹配的字符串 \"\"\" self._string = str_ self._root", "self._part_tag(root) return list(filter(lambda x: x[1], self.data)) else: sf = self._no_part(root)", "re class RegexEngine: def __init__(self, xml, str_): \"\"\"加载正则表。正则表为xml :param xml:", "@property def string(self): return self._string @string.setter def string(self, str_): self._string", "self.re = self._no_part(tag) else: attrib = tag.attrib text = tag.text.strip()", "import re class RegexEngine: def __init__(self, xml, str_): \"\"\"加载正则表。正则表为xml :param", "return re.findall(self.re, self._string) def _no_part(self, tags): \"\"\"tag标签不分开抽取\"\"\" for tag in", "et.parse(xml).getroot() self.re = '' self.data = [] def select(self, tag):", "xml的tag标签 :return: 正则提取的数据 \"\"\" root = self._root.find(tag) attrib = root.attrib", "= '(?:' + self._no_part(tag) + ')' else: self.re = self._no_part(tag)", "tag: if tag.attrib.get('must', 'true').lower() == 'true': self.data.append(self.re) self.re = ''", "xml.etree.cElementTree as et except ModuleNotFoundError: import xml.etree.ElementTree as et import", "+= '(?:' + text + ')?' return self.re def _part_tag(self,", "_part_tag(self, tags): \"\"\"tag标签分开提取\"\"\" for tag in tags: if tag: self._part_tag(tag)", "text = tag.text.strip() if attrib.get('must', 'true').lower() == 'true': self.re =", "self._string) def _no_part(self, tags): \"\"\"tag标签不分开抽取\"\"\" for tag in tags: if", "__init__(self, xml, str_): \"\"\"加载正则表。正则表为xml :param xml: 正则表的位置 :param str_: 要匹配的字符串", "for tag in tags: if tag: if tag.attrib.get('must', 'true').lower() ==", "'(?:' + text + ')?' return self.re def _part_tag(self, tags):", "'(?:' + self._no_part(tag) + ')' else: self.re = self._no_part(tag) else:", "self._string @string.setter def string(self, str_): self._string = str_ self.re, self.data", ":param tag: xml的tag标签 :return: 正则提取的数据 \"\"\" root = self._root.find(tag) attrib", "in tags: if tag: self._part_tag(tag) else: self.data.append((tag.tag, re.findall(tag.text.strip(), self._string))) @property", "= et.parse(xml).getroot() self.re = '' self.data = [] def select(self,", "tag: xml的tag标签 :return: 正则提取的数据 \"\"\" root = self._root.find(tag) attrib =", "_no_part(self, tags): \"\"\"tag标签不分开抽取\"\"\" for tag in tags: if tag: if", "'true': self._part_tag(root) return list(filter(lambda x: x[1], self.data)) else: sf =", "tag in tags: if tag: if tag.attrib.get('must', 'true').lower() == 'true':", "else: self.re += '(?:' + text + ')?' return self.re", "if tag: self._part_tag(tag) else: self.data.append((tag.tag, re.findall(tag.text.strip(), self._string))) @property def string(self):", "self._root = et.parse(xml).getroot() self.re = '' self.data = [] def", "ModuleNotFoundError: import xml.etree.ElementTree as et import re class RegexEngine: def", "for tag in tags: if tag: self._part_tag(tag) else: self.data.append((tag.tag, re.findall(tag.text.strip(),", "et except ModuleNotFoundError: import xml.etree.ElementTree as et import re class", "tag.attrib text = tag.text.strip() if attrib.get('must', 'true').lower() == 'true': self.re", "tag: self._part_tag(tag) else: self.data.append((tag.tag, re.findall(tag.text.strip(), self._string))) @property def string(self): return", "@string.setter def string(self, str_): self._string = str_ self.re, self.data =", "except ModuleNotFoundError: import xml.etree.ElementTree as et import re class RegexEngine:", "self.re = '(?:' + text + ')' else: self.re +=", "else: self.re = self._no_part(tag) else: attrib = tag.attrib text =", "self._part_tag(tag) else: self.data.append((tag.tag, re.findall(tag.text.strip(), self._string))) @property def string(self): return self._string", "正则解析器 \"\"\" try: import xml.etree.cElementTree as et except ModuleNotFoundError: import", "if tag: if tag.attrib.get('must', 'true').lower() == 'true': self.data.append(self.re) self.re =", "= '' self.re = '(?:' + self._no_part(tag) + ')' else:" ]
[ "internal sklearn DecisionTreeClassifier \"\"\" def __init__(self, kwargs={}): self.kwargs = kwargs", "A dictionary containing metrics for judging network performance. fit_kwargs :", "Input data matrix. Returns ------- X_transformed : ndarray The transformed", "identify if the network is pretrained. compile_kwargs : dict, default={\"metrics\":", "\"\"\" Performs inference using the transformer. Parameters ---------- X :", "layer of the transformer. optimizer : str or keras.optimizers instance", "keras.models.Model( inputs=self.network.inputs, outputs=self.network.layers[euclidean_layer_idx].output, ) self.pretrained = pretrained self.optimizer = optimizer", "<NAME> Corresponding Email: <EMAIL> \"\"\" import keras import numpy as", "euclidean_layer_idx parameter. \"\"\" def __init__( self, network, euclidean_layer_idx, optimizer, loss=\"categorical_crossentropy\",", "loss=\"categorical_crossentropy\", pretrained=False, compile_kwargs={\"metrics\": [\"acc\"]}, fit_kwargs={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")],", "X : ndarray Input data matrix. y : ndarray Output", "dict, default={} A dictionary to contain parameters of the tree.", "dictionary containing metrics for judging network performance. fit_kwargs : dict,", "keras.optimizers instance An optimizer used when compiling the neural network.", "of the tree. Attributes ---------- transformer : sklearn.tree.DecisionTreeClassifier an internal", "100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False, \"validation_split\": 0.33, }, A", "y) return self def transform(self, X): \"\"\" Performs inference using", "compile_kwargs self.fit_kwargs = fit_kwargs def fit(self, X, y): \"\"\" Fits", "pretrained self.optimizer = optimizer self.loss = loss self.compile_kwargs = compile_kwargs", "= pretrained self.optimizer = optimizer self.loss = loss self.compile_kwargs =", "0.33, }, A dictionary to hold epochs, callbacks, verbose, and", ": bool, default=False A boolean used to identify if the", "check_X_y from .base import BaseTransformer class NeuralClassificationTransformer(BaseTransformer): \"\"\" A class", "\"validation_split\": 0.33, }, A dictionary to hold epochs, callbacks, verbose,", "class used to transform data from a category to a", "loss : str, default=\"categorical_crossentropy\" A loss function used when compiling", "default={} A dictionary to contain parameters of the tree. Attributes", "= loss self.compile_kwargs = compile_kwargs self.fit_kwargs = fit_kwargs def fit(self,", "Returns ------- self : TreeClassificationTransformer The object itself. \"\"\" X,", "used when compiling the neural network. pretrained : bool, default=False", "function used when compiling the neural network. pretrained : bool,", "\"verbose\": False, \"validation_split\": 0.33, }, A dictionary to hold epochs,", "neural network. loss : str, default=\"categorical_crossentropy\" A loss function used", "the network is pretrained. compile_kwargs : dict, default={\"metrics\": [\"acc\"]} A", "compile_kwargs={\"metrics\": [\"acc\"]}, fit_kwargs={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False,", "specialized representation. Parameters ---------- kwargs : dict, default={} A dictionary", "y) self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y) return self def transform(self, X):", ": ndarray Output (i.e. response data matrix). Returns ------- self", "hold epochs, callbacks, verbose, and validation split for the network.", "a specialized representation. Parameters ---------- network : object A neural", "default={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False, \"validation_split\": 0.33,", "= fit_kwargs def fit(self, X, y): \"\"\" Fits the transformer", "neural network. pretrained : bool, default=False A boolean used to", "self def transform(self, X): \"\"\" Performs inference using the transformer.", "a category to a specialized representation. Parameters ---------- kwargs :", "TreeClassificationTransformer The object itself. \"\"\" X, y = check_X_y(X, y)", "dictionary to contain parameters of the tree. Attributes ---------- transformer", "\"\"\" Fits the transformer to data X with labels y.", ": str or keras.optimizers instance An optimizer used when compiling", "self : NeuralClassificationTransformer The object itself. \"\"\" check_X_y(X, y) _,", "---------- transformer : sklearn.tree.DecisionTreeClassifier an internal sklearn DecisionTreeClassifier \"\"\" def", "\"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False, \"validation_split\": 0.33, }, ): self.network", "<gh_stars>0 \"\"\" Main Author: <NAME> Corresponding Email: <EMAIL> \"\"\" import", ": ndarray Input data matrix. y : ndarray Output (i.e.", "with inputs and outputs based on the network attribute. Output", "[keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False, \"validation_split\": 0.33, }, ): self.network =", "data X with labels y. Parameters ---------- X : ndarray", "when compiling the neural network. pretrained : bool, default=False A", "**self.compile_kwargs ) self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs) return self def transform(self, X):", "= check_X_y(X, y) self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y) return self def", "An integer to represent the final layer of the transformer.", "\"\"\" check_is_fitted(self) check_array(X) return self.encoder_.predict(X) class TreeClassificationTransformer(BaseTransformer): \"\"\" A class", "used to transform data from a category to a specialized", "the model is not fitted. \"\"\" check_is_fitted(self) check_array(X) return self.encoder_.predict(X)", "neural network used in the classification transformer. euclidean_layer_idx : int", "judging network performance. fit_kwargs : dict, default={ \"epochs\": 100, \"callbacks\":", "__init__(self, kwargs={}): self.kwargs = kwargs def fit(self, X, y): \"\"\"", "to a specialized representation. Parameters ---------- kwargs : dict, default={}", "category to a specialized representation. Parameters ---------- kwargs : dict,", "Raises ------ NotFittedError When the model is not fitted. \"\"\"", "classification transformer. euclidean_layer_idx : int An integer to represent the", "matrix. Returns ------- X_transformed : ndarray The transformed input. Raises", "the neural network. loss : str, default=\"categorical_crossentropy\" A loss function", "return self.encoder_.predict(X) class TreeClassificationTransformer(BaseTransformer): \"\"\" A class used to transform", "the model is not fitted. \"\"\" check_is_fitted(self) X = check_array(X)", "self.loss = loss self.compile_kwargs = compile_kwargs self.fit_kwargs = fit_kwargs def", "the classification transformer. euclidean_layer_idx : int An integer to represent", "BaseTransformer class NeuralClassificationTransformer(BaseTransformer): \"\"\" A class used to transform data", "keras import numpy as np from sklearn.tree import DecisionTreeClassifier from", "representation. Parameters ---------- network : object A neural network used", "\"verbose\": False, \"validation_split\": 0.33, }, ): self.network = keras.models.clone_model(network) self.encoder_", "__init__( self, network, euclidean_layer_idx, optimizer, loss=\"categorical_crossentropy\", pretrained=False, compile_kwargs={\"metrics\": [\"acc\"]}, fit_kwargs={", "outputs=self.network.layers[euclidean_layer_idx].output, ) self.pretrained = pretrained self.optimizer = optimizer self.loss =", "y = check_X_y(X, y) self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y) return self", "str, default=\"categorical_crossentropy\" A loss function used when compiling the neural", "Output layers are determined by the euclidean_layer_idx parameter. \"\"\" def", "Output (i.e. response data matrix). Returns ------- self : NeuralClassificationTransformer", "network, euclidean_layer_idx, optimizer, loss=\"categorical_crossentropy\", pretrained=False, compile_kwargs={\"metrics\": [\"acc\"]}, fit_kwargs={ \"epochs\": 100,", "matrix). Returns ------- self : NeuralClassificationTransformer The object itself. \"\"\"", "optimizer=self.optimizer, **self.compile_kwargs ) self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs) return self def transform(self,", "int An integer to represent the final layer of the", "network. Attributes ---------- encoder_ : object A Keras model with", "check_X_y(X, y) self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y) return self def transform(self,", "\"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False, \"validation_split\": 0.33, },", "on the network attribute. Output layers are determined by the", "fit_kwargs def fit(self, X, y): \"\"\" Fits the transformer to", "keras.utils.to_categorical(y), **self.fit_kwargs) return self def transform(self, X): \"\"\" Performs inference", "verbose, and validation split for the network. Attributes ---------- encoder_", "loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs ) self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs) return self def", "import BaseTransformer class NeuralClassificationTransformer(BaseTransformer): \"\"\" A class used to transform", "X): \"\"\" Performs inference using the transformer. Parameters ---------- X", "}, ): self.network = keras.models.clone_model(network) self.encoder_ = keras.models.Model( inputs=self.network.inputs, outputs=self.network.layers[euclidean_layer_idx].output,", "ndarray Input data matrix. Returns ------- X_transformed : ndarray The", "matrix). Returns ------- self : TreeClassificationTransformer The object itself. \"\"\"", "ndarray Output (i.e. response data matrix). Returns ------- self :", "default=\"categorical_crossentropy\" A loss function used when compiling the neural network.", "[\"acc\"]} A dictionary containing metrics for judging network performance. fit_kwargs", "A neural network used in the classification transformer. euclidean_layer_idx :", "parameter. \"\"\" def __init__( self, network, euclidean_layer_idx, optimizer, loss=\"categorical_crossentropy\", pretrained=False,", "Main Author: <NAME> Corresponding Email: <EMAIL> \"\"\" import keras import", "------- self : NeuralClassificationTransformer The object itself. \"\"\" check_X_y(X, y)", "y = np.unique(y, return_inverse=True) # more typechecking self.network.compile( loss=self.loss, optimizer=self.optimizer,", "check_array, check_is_fitted, check_X_y from .base import BaseTransformer class NeuralClassificationTransformer(BaseTransformer): \"\"\"", "\"\"\" X, y = check_X_y(X, y) self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y)", "import DecisionTreeClassifier from sklearn.utils.validation import check_array, check_is_fitted, check_X_y from .base", "self.encoder_ = keras.models.Model( inputs=self.network.inputs, outputs=self.network.layers[euclidean_layer_idx].output, ) self.pretrained = pretrained self.optimizer", "---------- network : object A neural network used in the", "X : ndarray Input data matrix. Returns ------- X_transformed :", "is not fitted. \"\"\" check_is_fitted(self) X = check_array(X) return self.transformer_.apply(X)", "instance An optimizer used when compiling the neural network. loss", "used to identify if the network is pretrained. compile_kwargs :", "data from a category to a specialized representation. Parameters ----------", "sklearn.tree import DecisionTreeClassifier from sklearn.utils.validation import check_array, check_is_fitted, check_X_y from", "A dictionary to hold epochs, callbacks, verbose, and validation split", "transformer. Parameters ---------- X : ndarray Input data matrix. Returns", "<EMAIL> \"\"\" import keras import numpy as np from sklearn.tree", "A loss function used when compiling the neural network. pretrained", "Returns ------- self : NeuralClassificationTransformer The object itself. \"\"\" check_X_y(X,", "\"\"\" def __init__( self, network, euclidean_layer_idx, optimizer, loss=\"categorical_crossentropy\", pretrained=False, compile_kwargs={\"metrics\":", ") self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs) return self def transform(self, X): \"\"\"", "ndarray The transformed input. Raises ------ NotFittedError When the model", "class NeuralClassificationTransformer(BaseTransformer): \"\"\" A class used to transform data from", "category to a specialized representation. Parameters ---------- network : object", "= optimizer self.loss = loss self.compile_kwargs = compile_kwargs self.fit_kwargs =", "to contain parameters of the tree. Attributes ---------- transformer :", "to hold epochs, callbacks, verbose, and validation split for the", "return_inverse=True) # more typechecking self.network.compile( loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs ) self.network.fit(X,", "loss function used when compiling the neural network. pretrained :", ") self.pretrained = pretrained self.optimizer = optimizer self.loss = loss", "NotFittedError When the model is not fitted. \"\"\" check_is_fitted(self) check_array(X)", ": TreeClassificationTransformer The object itself. \"\"\" X, y = check_X_y(X,", "The object itself. \"\"\" X, y = check_X_y(X, y) self.transformer_", "numpy as np from sklearn.tree import DecisionTreeClassifier from sklearn.utils.validation import", "y. Parameters ---------- X : ndarray Input data matrix. y", "a category to a specialized representation. Parameters ---------- network :", "transformer : sklearn.tree.DecisionTreeClassifier an internal sklearn DecisionTreeClassifier \"\"\" def __init__(self,", "network. pretrained : bool, default=False A boolean used to identify", "if the network is pretrained. compile_kwargs : dict, default={\"metrics\": [\"acc\"]}", "metrics for judging network performance. fit_kwargs : dict, default={ \"epochs\":", ": dict, default={\"metrics\": [\"acc\"]} A dictionary containing metrics for judging", "monitor=\"val_acc\")], \"verbose\": False, \"validation_split\": 0.33, }, ): self.network = keras.models.clone_model(network)", "\"\"\" A class used to transform data from a category", "(i.e. response data matrix). Returns ------- self : NeuralClassificationTransformer The", "network attribute. Output layers are determined by the euclidean_layer_idx parameter.", "model is not fitted. \"\"\" check_is_fitted(self) X = check_array(X) return", "sklearn DecisionTreeClassifier \"\"\" def __init__(self, kwargs={}): self.kwargs = kwargs def", "DecisionTreeClassifier from sklearn.utils.validation import check_array, check_is_fitted, check_X_y from .base import", "import keras import numpy as np from sklearn.tree import DecisionTreeClassifier", "): self.network = keras.models.clone_model(network) self.encoder_ = keras.models.Model( inputs=self.network.inputs, outputs=self.network.layers[euclidean_layer_idx].output, )", ": object A neural network used in the classification transformer.", ": NeuralClassificationTransformer The object itself. \"\"\" check_X_y(X, y) _, y", "self.pretrained = pretrained self.optimizer = optimizer self.loss = loss self.compile_kwargs", "the euclidean_layer_idx parameter. \"\"\" def __init__( self, network, euclidean_layer_idx, optimizer,", "def __init__(self, kwargs={}): self.kwargs = kwargs def fit(self, X, y):", "False, \"validation_split\": 0.33, }, A dictionary to hold epochs, callbacks,", "the tree. Attributes ---------- transformer : sklearn.tree.DecisionTreeClassifier an internal sklearn", "DecisionTreeClassifier \"\"\" def __init__(self, kwargs={}): self.kwargs = kwargs def fit(self,", "Performs inference using the transformer. Parameters ---------- X : ndarray", "self : TreeClassificationTransformer The object itself. \"\"\" X, y =", "str or keras.optimizers instance An optimizer used when compiling the", "contain parameters of the tree. Attributes ---------- transformer : sklearn.tree.DecisionTreeClassifier", "= keras.models.Model( inputs=self.network.inputs, outputs=self.network.layers[euclidean_layer_idx].output, ) self.pretrained = pretrained self.optimizer =", "pretrained : bool, default=False A boolean used to identify if", ".base import BaseTransformer class NeuralClassificationTransformer(BaseTransformer): \"\"\" A class used to", "used in the classification transformer. euclidean_layer_idx : int An integer", "euclidean_layer_idx : int An integer to represent the final layer", "= np.unique(y, return_inverse=True) # more typechecking self.network.compile( loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs", "used when compiling the neural network. loss : str, default=\"categorical_crossentropy\"", "def fit(self, X, y): \"\"\" Fits the transformer to data", "X, y = check_X_y(X, y) self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y) return", "network performance. fit_kwargs : dict, default={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5,", "self.compile_kwargs = compile_kwargs self.fit_kwargs = fit_kwargs def fit(self, X, y):", "sklearn.utils.validation import check_array, check_is_fitted, check_X_y from .base import BaseTransformer class", "more typechecking self.network.compile( loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs ) self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs)", "for the network. Attributes ---------- encoder_ : object A Keras", "object itself. \"\"\" X, y = check_X_y(X, y) self.transformer_ =", "DecisionTreeClassifier(**self.kwargs).fit(X, y) return self def transform(self, X): \"\"\" Performs inference", "is pretrained. compile_kwargs : dict, default={\"metrics\": [\"acc\"]} A dictionary containing", "data matrix). Returns ------- self : NeuralClassificationTransformer The object itself.", "X, y): \"\"\" Fits the transformer to data X with", "containing metrics for judging network performance. fit_kwargs : dict, default={", "A boolean used to identify if the network is pretrained.", "NeuralClassificationTransformer(BaseTransformer): \"\"\" A class used to transform data from a", "to transform data from a category to a specialized representation.", "using the transformer. Parameters ---------- X : ndarray Input data", "------- self : TreeClassificationTransformer The object itself. \"\"\" X, y", "response data matrix). Returns ------- self : NeuralClassificationTransformer The object", "with labels y. Parameters ---------- X : ndarray Input data", "of the transformer. optimizer : str or keras.optimizers instance An", "transformer. optimizer : str or keras.optimizers instance An optimizer used", "response data matrix). Returns ------- self : TreeClassificationTransformer The object", "ndarray Input data matrix. y : ndarray Output (i.e. response", "kwargs : dict, default={} A dictionary to contain parameters of", "itself. \"\"\" X, y = check_X_y(X, y) self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X,", "fit_kwargs : dict, default={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\":", "transform data from a category to a specialized representation. Parameters", "network. loss : str, default=\"categorical_crossentropy\" A loss function used when", "fit_kwargs={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False, \"validation_split\": 0.33,", "self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y) return self def transform(self, X): \"\"\"", "dict, default={\"metrics\": [\"acc\"]} A dictionary containing metrics for judging network", "y : ndarray Output (i.e. response data matrix). Returns -------", "is not fitted. \"\"\" check_is_fitted(self) check_array(X) return self.encoder_.predict(X) class TreeClassificationTransformer(BaseTransformer):", "self.network.compile( loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs ) self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs) return self", "compiling the neural network. loss : str, default=\"categorical_crossentropy\" A loss", "---------- kwargs : dict, default={} A dictionary to contain parameters", "kwargs def fit(self, X, y): \"\"\" Fits the transformer to", "= kwargs def fit(self, X, y): \"\"\" Fits the transformer", "representation. Parameters ---------- kwargs : dict, default={} A dictionary to", "X_transformed : ndarray The transformed input. Raises ------ NotFittedError When", "model is not fitted. \"\"\" check_is_fitted(self) check_array(X) return self.encoder_.predict(X) class", "Parameters ---------- network : object A neural network used in", "labels y. Parameters ---------- X : ndarray Input data matrix.", "or keras.optimizers instance An optimizer used when compiling the neural", "transformer to data X with labels y. Parameters ---------- X", "Input data matrix. y : ndarray Output (i.e. response data", "100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False, \"validation_split\": 0.33, }, ):", "optimizer : str or keras.optimizers instance An optimizer used when", ": dict, default={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False,", "integer to represent the final layer of the transformer. optimizer", "determined by the euclidean_layer_idx parameter. \"\"\" def __init__( self, network,", "outputs based on the network attribute. Output layers are determined", "are determined by the euclidean_layer_idx parameter. \"\"\" def __init__( self,", "to data X with labels y. Parameters ---------- X :", "**self.fit_kwargs) return self def transform(self, X): \"\"\" Performs inference using", "check_is_fitted, check_X_y from .base import BaseTransformer class NeuralClassificationTransformer(BaseTransformer): \"\"\" A", "Returns ------- X_transformed : ndarray The transformed input. Raises ------", "optimizer used when compiling the neural network. loss : str,", "the transformer to data X with labels y. Parameters ----------", "split for the network. Attributes ---------- encoder_ : object A", "the transformer. Parameters ---------- X : ndarray Input data matrix.", "---------- X : ndarray Input data matrix. Returns ------- X_transformed", "dict, default={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False, \"validation_split\":", "fitted. \"\"\" check_is_fitted(self) check_array(X) return self.encoder_.predict(X) class TreeClassificationTransformer(BaseTransformer): \"\"\" A", "encoder_ : object A Keras model with inputs and outputs", "inputs=self.network.inputs, outputs=self.network.layers[euclidean_layer_idx].output, ) self.pretrained = pretrained self.optimizer = optimizer self.loss", "the neural network. pretrained : bool, default=False A boolean used", "the network attribute. Output layers are determined by the euclidean_layer_idx", "np from sklearn.tree import DecisionTreeClassifier from sklearn.utils.validation import check_array, check_is_fitted,", "self.encoder_.predict(X) class TreeClassificationTransformer(BaseTransformer): \"\"\" A class used to transform data", "transformer. euclidean_layer_idx : int An integer to represent the final", "def __init__( self, network, euclidean_layer_idx, optimizer, loss=\"categorical_crossentropy\", pretrained=False, compile_kwargs={\"metrics\": [\"acc\"]},", "monitor=\"val_acc\")], \"verbose\": False, \"validation_split\": 0.33, }, A dictionary to hold", "check_is_fitted(self) check_array(X) return self.encoder_.predict(X) class TreeClassificationTransformer(BaseTransformer): \"\"\" A class used", "# more typechecking self.network.compile( loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs ) self.network.fit(X, keras.utils.to_categorical(y),", "compiling the neural network. pretrained : bool, default=False A boolean", "object A Keras model with inputs and outputs based on", "by the euclidean_layer_idx parameter. \"\"\" def __init__( self, network, euclidean_layer_idx,", "check_array(X) return self.encoder_.predict(X) class TreeClassificationTransformer(BaseTransformer): \"\"\" A class used to", "Attributes ---------- encoder_ : object A Keras model with inputs", "optimizer self.loss = loss self.compile_kwargs = compile_kwargs self.fit_kwargs = fit_kwargs", "itself. \"\"\" check_X_y(X, y) _, y = np.unique(y, return_inverse=True) #", "kwargs={}): self.kwargs = kwargs def fit(self, X, y): \"\"\" Fits", "network : object A neural network used in the classification", "boolean used to identify if the network is pretrained. compile_kwargs", "np.unique(y, return_inverse=True) # more typechecking self.network.compile( loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs )", "and outputs based on the network attribute. Output layers are", "for judging network performance. fit_kwargs : dict, default={ \"epochs\": 100,", "X with labels y. Parameters ---------- X : ndarray Input", "Parameters ---------- X : ndarray Input data matrix. y :", "class TreeClassificationTransformer(BaseTransformer): \"\"\" A class used to transform data from", "0.33, }, ): self.network = keras.models.clone_model(network) self.encoder_ = keras.models.Model( inputs=self.network.inputs,", "inputs and outputs based on the network attribute. Output layers", "[\"acc\"]}, fit_kwargs={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False, \"validation_split\":", "final layer of the transformer. optimizer : str or keras.optimizers", "import check_array, check_is_fitted, check_X_y from .base import BaseTransformer class NeuralClassificationTransformer(BaseTransformer):", "The object itself. \"\"\" check_X_y(X, y) _, y = np.unique(y,", "to represent the final layer of the transformer. optimizer :", "A Keras model with inputs and outputs based on the", "data matrix. y : ndarray Output (i.e. response data matrix).", "compile_kwargs : dict, default={\"metrics\": [\"acc\"]} A dictionary containing metrics for", "Email: <EMAIL> \"\"\" import keras import numpy as np from", "as np from sklearn.tree import DecisionTreeClassifier from sklearn.utils.validation import check_array,", "validation split for the network. Attributes ---------- encoder_ : object", "model with inputs and outputs based on the network attribute.", "matrix. y : ndarray Output (i.e. response data matrix). Returns", "callbacks, verbose, and validation split for the network. Attributes ----------", "transformed input. Raises ------ NotFittedError When the model is not", "pretrained. compile_kwargs : dict, default={\"metrics\": [\"acc\"]} A dictionary containing metrics", "loss self.compile_kwargs = compile_kwargs self.fit_kwargs = fit_kwargs def fit(self, X,", "self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs) return self def transform(self, X): \"\"\" Performs", "\"\"\" import keras import numpy as np from sklearn.tree import", "self.network = keras.models.clone_model(network) self.encoder_ = keras.models.Model( inputs=self.network.inputs, outputs=self.network.layers[euclidean_layer_idx].output, ) self.pretrained", "transform(self, X): \"\"\" Performs inference using the transformer. Parameters ----------", "tree. Attributes ---------- transformer : sklearn.tree.DecisionTreeClassifier an internal sklearn DecisionTreeClassifier", "TreeClassificationTransformer(BaseTransformer): \"\"\" A class used to transform data from a", "when compiling the neural network. loss : str, default=\"categorical_crossentropy\" A", ": dict, default={} A dictionary to contain parameters of the", "layers are determined by the euclidean_layer_idx parameter. \"\"\" def __init__(", "input. Raises ------ NotFittedError When the model is not fitted.", "epochs, callbacks, verbose, and validation split for the network. Attributes", "import numpy as np from sklearn.tree import DecisionTreeClassifier from sklearn.utils.validation", "an internal sklearn DecisionTreeClassifier \"\"\" def __init__(self, kwargs={}): self.kwargs =", "= keras.models.clone_model(network) self.encoder_ = keras.models.Model( inputs=self.network.inputs, outputs=self.network.layers[euclidean_layer_idx].output, ) self.pretrained =", "------- X_transformed : ndarray The transformed input. Raises ------ NotFittedError", "(i.e. response data matrix). Returns ------- self : TreeClassificationTransformer The", "inference using the transformer. Parameters ---------- X : ndarray Input", "[keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False, \"validation_split\": 0.33, }, A dictionary to", "self.kwargs = kwargs def fit(self, X, y): \"\"\" Fits the", ": ndarray The transformed input. Raises ------ NotFittedError When the", "network used in the classification transformer. euclidean_layer_idx : int An", "self.fit_kwargs = fit_kwargs def fit(self, X, y): \"\"\" Fits the", "dictionary to hold epochs, callbacks, verbose, and validation split for", "= DecisionTreeClassifier(**self.kwargs).fit(X, y) return self def transform(self, X): \"\"\" Performs", "object itself. \"\"\" check_X_y(X, y) _, y = np.unique(y, return_inverse=True)", "keras.models.clone_model(network) self.encoder_ = keras.models.Model( inputs=self.network.inputs, outputs=self.network.layers[euclidean_layer_idx].output, ) self.pretrained = pretrained", "from .base import BaseTransformer class NeuralClassificationTransformer(BaseTransformer): \"\"\" A class used", "When the model is not fitted. \"\"\" check_is_fitted(self) X =", "NeuralClassificationTransformer The object itself. \"\"\" check_X_y(X, y) _, y =", "to identify if the network is pretrained. compile_kwargs : dict,", "y): \"\"\" Fits the transformer to data X with labels", "sklearn.tree.DecisionTreeClassifier an internal sklearn DecisionTreeClassifier \"\"\" def __init__(self, kwargs={}): self.kwargs", "Keras model with inputs and outputs based on the network", "the network. Attributes ---------- encoder_ : object A Keras model", "based on the network attribute. Output layers are determined by", "A class used to transform data from a category to", "a specialized representation. Parameters ---------- kwargs : dict, default={} A", "from sklearn.tree import DecisionTreeClassifier from sklearn.utils.validation import check_array, check_is_fitted, check_X_y", "and validation split for the network. Attributes ---------- encoder_ :", "euclidean_layer_idx, optimizer, loss=\"categorical_crossentropy\", pretrained=False, compile_kwargs={\"metrics\": [\"acc\"]}, fit_kwargs={ \"epochs\": 100, \"callbacks\":", "network is pretrained. compile_kwargs : dict, default={\"metrics\": [\"acc\"]} A dictionary", "in the classification transformer. euclidean_layer_idx : int An integer to", "\"\"\" def __init__(self, kwargs={}): self.kwargs = kwargs def fit(self, X,", "self, network, euclidean_layer_idx, optimizer, loss=\"categorical_crossentropy\", pretrained=False, compile_kwargs={\"metrics\": [\"acc\"]}, fit_kwargs={ \"epochs\":", "attribute. Output layers are determined by the euclidean_layer_idx parameter. \"\"\"", "Attributes ---------- transformer : sklearn.tree.DecisionTreeClassifier an internal sklearn DecisionTreeClassifier \"\"\"", "Fits the transformer to data X with labels y. Parameters", "---------- encoder_ : object A Keras model with inputs and", "NotFittedError When the model is not fitted. \"\"\" check_is_fitted(self) X", "pretrained=False, compile_kwargs={\"metrics\": [\"acc\"]}, fit_kwargs={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\":", "------ NotFittedError When the model is not fitted. \"\"\" check_is_fitted(self)", "the final layer of the transformer. optimizer : str or", "not fitted. \"\"\" check_is_fitted(self) check_array(X) return self.encoder_.predict(X) class TreeClassificationTransformer(BaseTransformer): \"\"\"", "self.optimizer = optimizer self.loss = loss self.compile_kwargs = compile_kwargs self.fit_kwargs", "An optimizer used when compiling the neural network. loss :", "return self def transform(self, X): \"\"\" Performs inference using the", "data matrix. Returns ------- X_transformed : ndarray The transformed input.", "Author: <NAME> Corresponding Email: <EMAIL> \"\"\" import keras import numpy", "\"validation_split\": 0.33, }, ): self.network = keras.models.clone_model(network) self.encoder_ = keras.models.Model(", "parameters of the tree. Attributes ---------- transformer : sklearn.tree.DecisionTreeClassifier an", "to a specialized representation. Parameters ---------- network : object A", "\"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")], \"verbose\": False, \"validation_split\": 0.33, }, A dictionary", "= compile_kwargs self.fit_kwargs = fit_kwargs def fit(self, X, y): \"\"\"", "\"\"\" Main Author: <NAME> Corresponding Email: <EMAIL> \"\"\" import keras", "Corresponding Email: <EMAIL> \"\"\" import keras import numpy as np", "---------- X : ndarray Input data matrix. y : ndarray", "represent the final layer of the transformer. optimizer : str", "False, \"validation_split\": 0.33, }, ): self.network = keras.models.clone_model(network) self.encoder_ =", "check_X_y(X, y) _, y = np.unique(y, return_inverse=True) # more typechecking", "\"\"\" check_X_y(X, y) _, y = np.unique(y, return_inverse=True) # more", "object A neural network used in the classification transformer. euclidean_layer_idx", "bool, default=False A boolean used to identify if the network", "from a category to a specialized representation. Parameters ---------- kwargs", "fit(self, X, y): \"\"\" Fits the transformer to data X", ": ndarray Input data matrix. Returns ------- X_transformed : ndarray", "Parameters ---------- kwargs : dict, default={} A dictionary to contain", "def transform(self, X): \"\"\" Performs inference using the transformer. Parameters", "default=False A boolean used to identify if the network is", "the transformer. optimizer : str or keras.optimizers instance An optimizer", "Output (i.e. response data matrix). Returns ------- self : TreeClassificationTransformer", "from a category to a specialized representation. Parameters ---------- network", ": object A Keras model with inputs and outputs based", "optimizer, loss=\"categorical_crossentropy\", pretrained=False, compile_kwargs={\"metrics\": [\"acc\"]}, fit_kwargs={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5,", ": int An integer to represent the final layer of", ": str, default=\"categorical_crossentropy\" A loss function used when compiling the", "y) _, y = np.unique(y, return_inverse=True) # more typechecking self.network.compile(", "The transformed input. Raises ------ NotFittedError When the model is", "from sklearn.utils.validation import check_array, check_is_fitted, check_X_y from .base import BaseTransformer", "data matrix). Returns ------- self : TreeClassificationTransformer The object itself.", "When the model is not fitted. \"\"\" check_is_fitted(self) check_array(X) return", "}, A dictionary to hold epochs, callbacks, verbose, and validation", "performance. fit_kwargs : dict, default={ \"epochs\": 100, \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")],", "specialized representation. Parameters ---------- network : object A neural network", "A dictionary to contain parameters of the tree. Attributes ----------", ": sklearn.tree.DecisionTreeClassifier an internal sklearn DecisionTreeClassifier \"\"\" def __init__(self, kwargs={}):", "_, y = np.unique(y, return_inverse=True) # more typechecking self.network.compile( loss=self.loss,", "typechecking self.network.compile( loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs ) self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs) return", "Parameters ---------- X : ndarray Input data matrix. Returns -------", "default={\"metrics\": [\"acc\"]} A dictionary containing metrics for judging network performance." ]
[ "self.n_states = len(np.unique(self.states)) self.trans = self.model.transmat_.copy() def rho_dt_bins(self, rho, theta,", "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Time series modeling of", "projection (str): cartesian or polar. ymax (int) \"\"\" avail_proj =", "dt.sum() def norm_state_time(self): \"\"\" Normalized state time. :return: \"\"\" unique,", "bin_means_2).all(), \"state and transition vectors are binned differently and can\"", "Plot a colored line with coordinates x and y Optionally", "& (rho > 0)]) > 0 else 0 for i", "new_cmap = mpl.colors.LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(np.linspace(minval, maxval, n))) return", "not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) norm = mpl.colors.Normalize(vmin=0, vmax=1) cax", "cmap = plt.get_cmap(cmap) new_cmap = mpl.colors.LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(np.linspace(minval,", "= arr[:, 0], arr[:, 1] rho = np.sqrt(x ** 2", "2, f\"X should be 2-d, instead got shape {X.shape}\" self.X", "dt, bins=12): \"\"\" Bin rho values and dwell time on", "bin_dt = self.signature[2, :], self.signature[3, :] fig, ax = plt.subplots(figsize=(5,", "as mpl import matplotlib.pyplot as plt import matplotlib.collections as mcoll", "== bin_means_2).all(), \"state and transition vectors are binned differently and", "isinstance(cmap, str): cmap = plt.get_cmap(cmap) new_cmap = mpl.colors.LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval,", "= np.concatenate([points[:-1], points[1:]], axis=1) return segments def truncate_colormap(cmap, minval=0.0, maxval=1.0,", "self.model.transmat_.copy() def rho_dt_bins(self, rho, theta, dt, bins=12): \"\"\" Bin rho", "bin_rd, bin_dt def transition_vectors(self): \"\"\" Transition vectors between states on", "(int) \"\"\" avail_proj = ['cartesian', 'polar'] projection = projection.lower() assert", "rho = np.sqrt(x ** 2 + y ** 2) theta", "= plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Blues\") N =", "\"\"\" Plot cell states. \"\"\" bin_rd, bin_dt = self.signature[0, :],", "(rho > 0)].mean() if len(rho[(bin_ix == i) & (rho >", "if not hasattr(z, \"__iter__\"): # to check for numerical input", "= plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection}) ax.scatter(x, y, c=self.states, cmap='Set1', zorder=2)", "len(np.unique(self.states)) self.trans = self.model.transmat_.copy() def rho_dt_bins(self, rho, theta, dt, bins=12):", "shape [n_state x dims] \"\"\" x, y = arr[:, 0],", "transition_vectors(self): \"\"\" Transition vectors between states on polar coordinates. :return:", "algorithm for plotting Hidden Markov Models. <NAME>, <NAME>, <NAME>, <NAME>,", "bin_means, array of shape [4 x n_bins] with 1. state", "trajectory. Args: projection (str): cartesian or polar. ymax (int) \"\"\"", "np.sqrt(x ** 2 + y ** 2) theta = np.arctan2(y,", "Create list of line segments from x and y coordinates,", "segments = make_segments(x, y) lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm,", "bins) bin_rd = [rho[(bin_ix == i) & (rho > 0)].mean()", "specify colors in the array z Optionally specify a colormap,", "coordinates. :return: \"\"\" mu_x, mu_y = self.means[:, 0], self.means[:, 1]", "for LineCollection: an array of the form numlines x (points", "of the form numlines x (points per line) x 2", "signature for a given model. :return: bin_means, array of shape", "x) return rho, theta class PhenoSign(BaseTraj): \"\"\"Phenotypic Signature class.\"\"\" def", "equally spaced on [0,1]: if z is None: z =", "super(Saphire, self).__init__(model, X) def plot_traj(self, projection='cartesian', ymax=None): \"\"\" Plot cell", ">= thresh] = 1 return design_trans def norm_trans_time(self): \"\"\" Normalized", "x dims] \"\"\" x, y = arr[:, 0], arr[:, 1]", "2 (x and y) array \"\"\" points = np.array([x, y]).T.reshape(-1,", "i) & (dt > 0)].sum() if len(dt[(bin_ix == i) &", "\"\"\"Implementation of the SAPHIRE algorithm for plotting Hidden Markov Models.", "dwell times \"\"\" # states mu_rho, mu_theta = self.cart2pol(self.means) state_dt", "x, y = self.X[:, 0], self.X[:, 1] x_mu, y_mu =", "return fig, ax def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0),", "f\"X should be 2-d, instead got shape {X.shape}\" self.X =", "cax = fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical',", "vectors are binned differently and can\" \\ \"not be concatenated.\"", "y_mu, c=np.unique(self.states), cmap='Set1', s=200, zorder=2, edgecolor='black', alpha=0.6) legend = ax.legend(*traj.legend_elements(),", "self.X = X self.means = self.model.means_.copy() self.states = self.model.predict(X) self.n_states", "ax def plot_transition(self, ymax=None): \"\"\" Plot transition between cell states.", "\"\"\"Phenotypic Signature class.\"\"\" def __init__(self, model, X): super(PhenoSign, self).__init__(model, X)", "import matplotlib.pyplot as plt import matplotlib.collections as mcoll from matplotlib.ticker", "of the SAPHIRE algorithm for plotting Hidden Markov Models. <NAME>,", "self.means[:, 0], self.means[:, 1] mu_x_dist = mu_x - mu_x[:, np.newaxis]", "dwell time', ticks=[0, 0.5, 1]) return fig, ax def plot_transition(self,", "bin_means_1, state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta, state_dt) # transitions trans_rho,", "numlines x (points per line) x 2 (x and y)", "bin_rd, bin_dt = self.signature[2, :], self.signature[3, :] fig, ax =", "and y coordinates, in the correct format for LineCollection: an", "def __init__(self, model, X): super(PhenoSign, self).__init__(model, X) self.bin_means, self.signature =", "'polar'] projection = projection.lower() assert projection in avail_proj, f\"projection unknown:", "mu_rho, mu_theta = self.cart2pol(self.means) state_dt = self.norm_state_time() bin_means_1, state_rd_bins, state_dt_bins", "ymax is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) norm = mpl.colors.Normalize(vmin=0,", "i) & (dt > 0)]) > 0 else 0 for", "mpl.colors.Normalize(vmin=0, vmax=1) cax = fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),", "<NAME>, <NAME>, <NAME>, <NAME>. Time series modeling of live-cell shape", "orientation='vertical', label='Increasing transition dwell time', ticks=[0, 0.5, 1]) return fig,", "instead got shape {X.shape}\" self.X = X self.means = self.model.means_.copy()", "colorline(x, y, cmap=cmap, zorder=1) norm = mpl.colors.Normalize(vmin=0, vmax=48) cax =", "to polar space. Args: arr (numpy.array): Array of shape [n_state", "trans_theta = self.transition_vectors() trans_dt = self.norm_trans_time() bin_means_2, trans_rd_bins, trans_dt_bins =", "projection in avail_proj, f\"projection unknown: {projection}\" if projection == 'cartesian':", "0)].sum() if len(dt[(bin_ix == i) & (dt > 0)]) >", "Normalized state time. :return: \"\"\" unique, counts = np.unique(self.states, return_counts=True)", "= fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Time')", "= self.cart2pol(self.means) else: x, y = self.X[:, 0], self.X[:, 1]", "1 return design_trans def norm_trans_time(self): \"\"\" Normalized transition time. :return:", "zorder=1): \"\"\" Plot a colored line with coordinates x and", "model assert len(X.shape) == 2, f\"X should be 2-d, instead", "plt.gca() ax.add_collection(lc) return lc def make_segments(x, y): \"\"\" Create list", "bin_rd, width=width, color=cmap(bin_dt)) if ymax is not None: ax.set_ylim(0, ymax)", "= self.X[:, 0], self.X[:, 1] x_mu, y_mu = self.means[:, 0],", "cmap=cmap), cax=cax, orientation='vertical', label='Increasing state dwell time', ticks=[0, 0.5, 1])", "dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten())) trans_rho, trans_theta = self.cart2pol(dist_vect) trans_rho =", "matplotlib.ticker import MaxNLocator plt.style.use('seaborn-darkgrid') class BaseTraj: def __init__(self, model, X):", "[rho[(bin_ix == i) & (rho > 0)].mean() if len(rho[(bin_ix ==", "of line segments from x and y coordinates, in the", "trans_theta def design_transition(self, thresh=0.1): design_trans = self.trans diag_ix = np.diag_indices(len(design_trans))", "None: z = np.linspace(0.0, 1.0, len(x)) # Special case if", "= plt.get_cmap('binary') cmap = truncate_colormap(cmap, minval=0.2) if projection == 'polar':", "ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Blues\") N", "the correct format for LineCollection: an array of the form", "{projection}\" if projection == 'cartesian': projection = None cmap =", "maxval=1.0, n=100): ''' https://stackoverflow.com/a/18926541 ''' if isinstance(cmap, str): cmap =", "3. transition dwell times \"\"\" # states mu_rho, mu_theta =", "ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) norm = mpl.colors.Normalize(vmin=0, vmax=1) cax = fig.add_axes([0.94, 0.15,", "2016;8(1):73-90. \"\"\" def __init__(self, model, X): super(Saphire, self).__init__(model, X) def", "on polar coordinates. :param rho: :param theta: :param dt: :param", "= [rho[(bin_ix == i) & (rho > 0)].mean() if len(rho[(bin_ix", "with coordinates x and y Optionally specify colors in the", "= np.unique(self.states, return_counts=True) sort_ix = unique.argsort() counts = counts[sort_ix] return", "def get_signature(self): \"\"\" Calculate phenotypic signature for a given model.", "space. Args: arr (numpy.array): Array of shape [n_state x dims]", "5), subplot_kw={'projection': projection}) ax.scatter(x, y, c=self.states, cmap='Set1', zorder=2) traj =", "# Special case if a single number: if not hasattr(z,", "> 0 else 0 for i in range(1, len(bins))] bin_dt", "3. transition distances 3. transition dwell times \"\"\" # states", "N = 12 width = (2 * np.pi) / N", "= self.get_signature() def get_signature(self): \"\"\" Calculate phenotypic signature for a", "ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) norm = mpl.colors.Normalize(vmin=0, vmax=1) cax = fig.add_axes([0.94,", "a hack z = np.array([z]) z = np.asarray(z) segments =", "design_trans[design_trans < thresh] = 0 design_trans[design_trans >= thresh] = 1", "a line width \"\"\" # Default colors equally spaced on", "projection = None cmap = plt.get_cmap('binary') cmap = truncate_colormap(cmap, minval=0.2)", "state dwell time', ticks=[0, 0.5, 1]) return fig, ax def", "self.X[:, 1] x_mu, y_mu = self.means[:, 0], self.means[:, 1] fig,", "mu_y_dist.flatten())) trans_rho, trans_theta = self.cart2pol(dist_vect) trans_rho = (trans_rho.reshape((self.n_states, self.n_states)) *", "cmap = plt.get_cmap(\"Oranges\") N = 12 width = (2 *", "2-d, instead got shape {X.shape}\" self.X = X self.means =", "states on polar coordinates. :return: \"\"\" mu_x, mu_y = self.means[:,", "0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing transition dwell", "if projection == 'polar': y, x = self.cart2pol(self.X) y_mu, x_mu", "== 2, f\"X should be 2-d, instead got shape {X.shape}\"", "= np.digitize(theta, bins) bin_rd = [rho[(bin_ix == i) & (rho", "\"\"\" Calculate phenotypic signature for a given model. :return: bin_means,", "= self.model.means_.copy() self.states = self.model.predict(X) self.n_states = len(np.unique(self.states)) self.trans =", "= plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Oranges\") N =", "'polar'}) cmap = plt.get_cmap(\"Blues\") N = 12 width = (2", "sort_ix = unique.argsort() counts = counts[sort_ix] return counts / counts.sum()", "return bin_means, bin_rd, bin_dt def transition_vectors(self): \"\"\" Transition vectors between", "from x and y coordinates, in the correct format for", "fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Time') plt.show()", "alpha=alpha, zorder=zorder) ax = plt.gca() ax.add_collection(lc) return lc def make_segments(x,", "ax.add_collection(lc) return lc def make_segments(x, y): \"\"\" Create list of", "\"\"\" Plot a colored line with coordinates x and y", "from matplotlib.ticker import MaxNLocator plt.style.use('seaborn-darkgrid') class BaseTraj: def __init__(self, model,", "binned differently and can\" \\ \"not be concatenated.\" return bin_means_1,", "& (dt > 0)].sum() if len(dt[(bin_ix == i) & (dt", "mu_y_dist = mu_y - mu_y[:, np.newaxis] dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten()))", "an array of the form numlines x (points per line)", "== i) & (dt > 0)]) > 0 else 0", "z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=3, alpha=1.0, zorder=1): \"\"\" Plot a", "Args: projection (str): cartesian or polar. ymax (int) \"\"\" avail_proj", "str): cmap = plt.get_cmap(cmap) new_cmap = mpl.colors.LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),", "0 for i in range(1, len(bins))] bin_dt = [dt[(bin_ix ==", "given model. :return: bin_means, array of shape [4 x n_bins]", "subplot_kw={'projection': projection}) ax.scatter(x, y, c=self.states, cmap='Set1', zorder=2) traj = ax.scatter(x_mu,", "Normalized transition time. :return: \"\"\" unique, counts = np.unique(self.states, return_counts=True)", "states. \"\"\" bin_rd, bin_dt = self.signature[2, :], self.signature[3, :] fig,", "colormap, a norm function and a line width \"\"\" #", "check for numerical input -- this is a hack z", "Optionally specify colors in the array z Optionally specify a", "import matplotlib.collections as mcoll from matplotlib.ticker import MaxNLocator plt.style.use('seaborn-darkgrid') class", "\"\"\" Bin rho values and dwell time on polar coordinates.", "state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta, state_dt) # transitions trans_rho, trans_theta =", "import MaxNLocator plt.style.use('seaborn-darkgrid') class BaseTraj: def __init__(self, model, X): self.model", "trans_dt) assert (bin_means_1 == bin_means_2).all(), \"state and transition vectors are", "between states on polar coordinates. :return: \"\"\" mu_x, mu_y =", "0], self.X[:, 1] x_mu, y_mu = self.means[:, 0], self.means[:, 1]", "Optionally specify a colormap, a norm function and a line", "rho_dt_bins(self, rho, theta, dt, bins=12): \"\"\" Bin rho values and", "y, x = self.cart2pol(self.X) y_mu, x_mu = self.cart2pol(self.means) else: x,", "plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection}) ax.scatter(x, y, c=self.states, cmap='Set1', zorder=2) traj", "mu_y - mu_y[:, np.newaxis] dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten())) trans_rho, trans_theta", "return_counts=True) sort_ix = unique.argsort() counts = counts[sort_ix] return counts /", "y, c=self.states, cmap='Set1', zorder=2) traj = ax.scatter(x_mu, y_mu, c=np.unique(self.states), cmap='Set1',", "= 0 design_trans[design_trans < thresh] = 0 design_trans[design_trans >= thresh]", "Special case if a single number: if not hasattr(z, \"__iter__\"):", "** 2 + y ** 2) theta = np.arctan2(y, x)", "1] x_mu, y_mu = self.means[:, 0], self.means[:, 1] fig, ax", "zorder=zorder) ax = plt.gca() ax.add_collection(lc) return lc def make_segments(x, y):", "zorder=2) traj = ax.scatter(x_mu, y_mu, c=np.unique(self.states), cmap='Set1', s=200, zorder=2, edgecolor='black',", "assert (bin_means_1 == bin_means_2).all(), \"state and transition vectors are binned", "self.signature[0, :], self.signature[1, :] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection':", "dwell time', ticks=[0, 0.5, 1]) return fig, ax def colorline(x,", "= self.norm_trans_time() bin_means_2, trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta, trans_dt) assert", "Transition vectors between states on polar coordinates. :return: \"\"\" mu_x,", "= np.sqrt(x ** 2 + y ** 2) theta =", "mu_x, mu_y = self.means[:, 0], self.means[:, 1] mu_x_dist = mu_x", "= self.model.predict(X) self.n_states = len(np.unique(self.states)) self.trans = self.model.transmat_.copy() def rho_dt_bins(self,", "Plot transition between cell states. \"\"\" bin_rd, bin_dt = self.signature[2,", "bin_ix = np.digitize(theta, bins) bin_rd = [rho[(bin_ix == i) &", "format for LineCollection: an array of the form numlines x", "mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha, zorder=zorder) ax = plt.gca()", "(x and y) array \"\"\" points = np.array([x, y]).T.reshape(-1, 1,", "bin_rd = [rho[(bin_ix == i) & (rho > 0)].mean() if", "vectors between states on polar coordinates. :return: \"\"\" mu_x, mu_y", "vmax=48) cax = fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,", "> 0)].mean() if len(rho[(bin_ix == i) & (rho > 0)])", "None cmap = plt.get_cmap('binary') cmap = truncate_colormap(cmap, minval=0.2) if projection", "norm = mpl.colors.Normalize(vmin=0, vmax=48) cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])", "states mu_rho, mu_theta = self.cart2pol(self.means) state_dt = self.norm_state_time() bin_means_1, state_rd_bins,", "fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Oranges\")", "self.n_states)) * self.design_transition()).flatten() return trans_rho, trans_theta def design_transition(self, thresh=0.1): design_trans", "x, y = arr[:, 0], arr[:, 1] rho = np.sqrt(x", "(dt > 0)].sum() if len(dt[(bin_ix == i) & (dt >", "vmax=1) cax = fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,", "x (points per line) x 2 (x and y) array", "x = self.cart2pol(self.X) y_mu, x_mu = self.cart2pol(self.means) else: x, y", "range(1, len(bins))] return bin_means, bin_rd, bin_dt def transition_vectors(self): \"\"\" Transition", "= self.transition_vectors() trans_dt = self.norm_trans_time() bin_means_2, trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho,", "polar space. Args: arr (numpy.array): Array of shape [n_state x", "Time series modeling of live-cell shape dynamics for image-based phenotypic", "numpy as np import matplotlib as mpl import matplotlib.pyplot as", "dwell time on polar coordinates. :param rho: :param theta: :param", "avail_proj, f\"projection unknown: {projection}\" if projection == 'cartesian': projection =", "colors equally spaced on [0,1]: if z is None: z", "theta: :param dt: :param bins: :return: \"\"\" bins = np.linspace(-np.pi,", "0 design_trans[design_trans < thresh] = 0 design_trans[design_trans >= thresh] =", "self.norm_trans_time() bin_means_2, trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta, trans_dt) assert (bin_means_1", "this is a hack z = np.array([z]) z = np.asarray(z)", "state time. :return: \"\"\" unique, counts = np.unique(self.states, return_counts=True) sort_ix", "sort_ix = unique.argsort() counts = counts[sort_ix] # normalize by transition", "def make_segments(x, y): \"\"\" Create list of line segments from", "= counts[sort_ix] return counts / counts.sum() @staticmethod def cart2pol(arr): \"\"\"", "2 + y ** 2) theta = np.arctan2(y, x) return", "(bin_means_1 == bin_means_2).all(), \"state and transition vectors are binned differently", "and can\" \\ \"not be concatenated.\" return bin_means_1, np.vstack((state_rd_bins, state_dt_bins,", "= fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing", "X): super(Saphire, self).__init__(model, X) def plot_traj(self, projection='cartesian', ymax=None): \"\"\" Plot", "a colored line with coordinates x and y Optionally specify", "len(X.shape) == 2, f\"X should be 2-d, instead got shape", "= 1 return design_trans def norm_trans_time(self): \"\"\" Normalized transition time.", "as np import matplotlib as mpl import matplotlib.pyplot as plt", "zorder=1) norm = mpl.colors.Normalize(vmin=0, vmax=48) cax = fig.add_axes([0.94, 0.15, 0.05,", "dt: :param bins: :return: \"\"\" bins = np.linspace(-np.pi, np.pi, bins+1)", "ax.add_artist(legend) if ymax is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) colorline(x,", "colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=3, alpha=1.0, zorder=1): \"\"\"", "to check for numerical input -- this is a hack", "cmap=cmap), cax=cax, orientation='vertical', label='Increasing transition dwell time', ticks=[0, 0.5, 1])", "in range(1, len(bins))] bin_dt = [dt[(bin_ix == i) & (dt", "values and dwell time on polar coordinates. :param rho: :param", "= self.signature[0, :], self.signature[1, :] fig, ax = plt.subplots(figsize=(5, 5),", "= (counts * self.design_transition()).flatten() return dt / dt.sum() def norm_state_time(self):", "c=self.states, cmap='Set1', zorder=2) traj = ax.scatter(x_mu, y_mu, c=np.unique(self.states), cmap='Set1', s=200,", "self.trans diag_ix = np.diag_indices(len(design_trans)) design_trans[diag_ix] = 0 design_trans[design_trans < thresh]", "array z Optionally specify a colormap, a norm function and", "def plot_traj(self, projection='cartesian', ymax=None): \"\"\" Plot cell trajectory. Args: projection", "color=cmap(bin_dt)) if ymax is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) norm", ":return: bin_means, array of shape [4 x n_bins] with 1.", "colors in the array z Optionally specify a colormap, a", "z = np.linspace(0.0, 1.0, len(x)) # Special case if a", "plot_traj(self, projection='cartesian', ymax=None): \"\"\" Plot cell trajectory. Args: projection (str):", "time. :return: \"\"\" unique, counts = np.unique(self.states, return_counts=True) sort_ix =", "self.signature[2, :], self.signature[3, :] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection':", "transition probability dt = (counts * self.design_transition()).flatten() return dt /", "line width \"\"\" # Default colors equally spaced on [0,1]:", "subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Blues\") N = 12 width =", "state_dt_bins, trans_rd_bins, trans_dt_bins)) class Saphire(PhenoSign): \"\"\"Implementation of the SAPHIRE algorithm", "\"\"\" avail_proj = ['cartesian', 'polar'] projection = projection.lower() assert projection", "thresh] = 1 return design_trans def norm_trans_time(self): \"\"\" Normalized transition", "= mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha, zorder=zorder) ax =", "= (trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten() return trans_rho, trans_theta def design_transition(self,", "0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Time') plt.show() return", "return counts / counts.sum() @staticmethod def cart2pol(arr): \"\"\" Cartesion space", "cmap='Set1', s=200, zorder=2, edgecolor='black', alpha=0.6) legend = ax.legend(*traj.legend_elements(), loc=\"upper right\",", "= np.diag_indices(len(design_trans)) design_trans[diag_ix] = 0 design_trans[design_trans < thresh] = 0", "Signature class.\"\"\" def __init__(self, model, X): super(PhenoSign, self).__init__(model, X) self.bin_means,", "\"\"\" Create list of line segments from x and y", "in avail_proj, f\"projection unknown: {projection}\" if projection == 'cartesian': projection", "image-based phenotypic profiling. Integr Biol (Camb). 2016;8(1):73-90. \"\"\" def __init__(self,", "time on polar coordinates. :param rho: :param theta: :param dt:", "bins[1:]) / 2 bin_ix = np.digitize(theta, bins) bin_rd = [rho[(bin_ix", "np.unique(self.states, return_counts=True) sort_ix = unique.argsort() counts = counts[sort_ix] # normalize", "differently and can\" \\ \"not be concatenated.\" return bin_means_1, np.vstack((state_rd_bins,", "0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Time') plt.show() return fig, ax", "state dwell times 3. transition distances 3. transition dwell times", "spaced on [0,1]: if z is None: z = np.linspace(0.0,", "mu_y[:, np.newaxis] dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten())) trans_rho, trans_theta = self.cart2pol(dist_vect)", "/ 2 bin_ix = np.digitize(theta, bins) bin_rd = [rho[(bin_ix ==", "SAPHIRE algorithm for plotting Hidden Markov Models. <NAME>, <NAME>, <NAME>,", "self.model.predict(X) self.n_states = len(np.unique(self.states)) self.trans = self.model.transmat_.copy() def rho_dt_bins(self, rho,", "and transition vectors are binned differently and can\" \\ \"not", "x_mu = self.cart2pol(self.means) else: x, y = self.X[:, 0], self.X[:,", "projection = projection.lower() assert projection in avail_proj, f\"projection unknown: {projection}\"", "self.X[:, 0], self.X[:, 1] x_mu, y_mu = self.means[:, 0], self.means[:,", "Saphire(PhenoSign): \"\"\"Implementation of the SAPHIRE algorithm for plotting Hidden Markov", "mu_x - mu_x[:, np.newaxis] mu_y_dist = mu_y - mu_y[:, np.newaxis]", "np.pi) / N ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt)) if ymax is", "make_segments(x, y): \"\"\" Create list of line segments from x", "colored line with coordinates x and y Optionally specify colors", "** 2) theta = np.arctan2(y, x) return rho, theta class", "= projection.lower() assert projection in avail_proj, f\"projection unknown: {projection}\" if", "0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Time') plt.show() return fig,", "= unique.argsort() counts = counts[sort_ix] return counts / counts.sum() @staticmethod", "bbox_to_anchor=(1.2, 0.94), title=\"States\") ax.add_artist(legend) if ymax is not None: ax.set_ylim(0,", "bins=12): \"\"\" Bin rho values and dwell time on polar", "make_segments(x, y) lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha,", "len(rho[(bin_ix == i) & (rho > 0)]) > 0 else", "matplotlib.collections as mcoll from matplotlib.ticker import MaxNLocator plt.style.use('seaborn-darkgrid') class BaseTraj:", "# normalize by transition probability dt = (counts * self.design_transition()).flatten()", "if z is None: z = np.linspace(0.0, 1.0, len(x)) #", "mu_theta, state_dt) # transitions trans_rho, trans_theta = self.transition_vectors() trans_dt =", "(2 * np.pi) / N ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt)) if", "y ** 2) theta = np.arctan2(y, x) return rho, theta", "Integr Biol (Camb). 2016;8(1):73-90. \"\"\" def __init__(self, model, X): super(Saphire,", "np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins)) class Saphire(PhenoSign): \"\"\"Implementation of the SAPHIRE", "norm=plt.Normalize(0.0, 1.0), linewidth=3, alpha=1.0, zorder=1): \"\"\" Plot a colored line", "= plt.gca() ax.add_collection(lc) return lc def make_segments(x, y): \"\"\" Create", "<NAME>. Time series modeling of live-cell shape dynamics for image-based", "polar coordinates. :param rho: :param theta: :param dt: :param bins:", "points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]],", "\"\"\" def __init__(self, model, X): super(Saphire, self).__init__(model, X) def plot_traj(self,", "= 12 width = (2 * np.pi) / N ax.bar(self.bin_means,", "width=width, color=cmap(bin_dt)) if ymax is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True))", "'cartesian': projection = None cmap = plt.get_cmap('binary') cmap = truncate_colormap(cmap,", "be 2-d, instead got shape {X.shape}\" self.X = X self.means", "None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) colorline(x, y, cmap=cmap, zorder=1) norm =", "dynamics for image-based phenotypic profiling. Integr Biol (Camb). 2016;8(1):73-90. \"\"\"", "plot_transition(self, ymax=None): \"\"\" Plot transition between cell states. \"\"\" bin_rd,", "of shape [n_state x dims] \"\"\" x, y = arr[:,", "1]) return fig, ax def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0,", "norm function and a line width \"\"\" # Default colors", "should be 2-d, instead got shape {X.shape}\" self.X = X", "per line) x 2 (x and y) array \"\"\" points", "diag_ix = np.diag_indices(len(design_trans)) design_trans[diag_ix] = 0 design_trans[design_trans < thresh] =", "self.cart2pol(self.means) state_dt = self.norm_state_time() bin_means_1, state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta,", "https://stackoverflow.com/a/18926541 ''' if isinstance(cmap, str): cmap = plt.get_cmap(cmap) new_cmap =", "mu_x[:, np.newaxis] mu_y_dist = mu_y - mu_y[:, np.newaxis] dist_vect =", "width \"\"\" # Default colors equally spaced on [0,1]: if", "transition vectors are binned differently and can\" \\ \"not be", "= self.cart2pol(self.X) y_mu, x_mu = self.cart2pol(self.means) else: x, y =", "if a single number: if not hasattr(z, \"__iter__\"): # to", "not hasattr(z, \"__iter__\"): # to check for numerical input --", "are binned differently and can\" \\ \"not be concatenated.\" return", "fig, ax def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=3,", "np.asarray(z) segments = make_segments(x, y) lc = mcoll.LineCollection(segments, array=z, cmap=cmap,", "for numerical input -- this is a hack z =", "y coordinates, in the correct format for LineCollection: an array", "thresh] = 0 design_trans[design_trans >= thresh] = 1 return design_trans", "truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): ''' https://stackoverflow.com/a/18926541 ''' if isinstance(cmap, str):", "alpha=1.0, zorder=1): \"\"\" Plot a colored line with coordinates x", "or polar. ymax (int) \"\"\" avail_proj = ['cartesian', 'polar'] projection", "line) x 2 (x and y) array \"\"\" points =", "cell trajectory. Args: projection (str): cartesian or polar. ymax (int)", "= plt.get_cmap(\"Oranges\") N = 12 width = (2 * np.pi)", "and dwell time on polar coordinates. :param rho: :param theta:", "y) array \"\"\" points = np.array([x, y]).T.reshape(-1, 1, 2) segments", "trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta, trans_dt) assert (bin_means_1 == bin_means_2).all(), \"state", "rho, theta, dt, bins=12): \"\"\" Bin rho values and dwell", "Models. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Time series modeling", "= counts[sort_ix] # normalize by transition probability dt = (counts", "= ax.scatter(x_mu, y_mu, c=np.unique(self.states), cmap='Set1', s=200, zorder=2, edgecolor='black', alpha=0.6) legend", "z = np.array([z]) z = np.asarray(z) segments = make_segments(x, y)", "x n_bins] with 1. state radial distances 2. state dwell", ":return: \"\"\" unique, counts = np.unique(self.states, return_counts=True) sort_ix = unique.argsort()", "cartesian or polar. ymax (int) \"\"\" avail_proj = ['cartesian', 'polar']", "orientation='vertical', label='Time') plt.show() return fig, ax def plot_states(self, ymax=None): \"\"\"", "np.array([z]) z = np.asarray(z) segments = make_segments(x, y) lc =", "2. state dwell times 3. transition distances 3. transition dwell", "be concatenated.\" return bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins)) class Saphire(PhenoSign):", "plt.get_cmap(\"Oranges\") N = 12 width = (2 * np.pi) /", "lc def make_segments(x, y): \"\"\" Create list of line segments", "minval=0.0, maxval=1.0, n=100): ''' https://stackoverflow.com/a/18926541 ''' if isinstance(cmap, str): cmap", "self.cart2pol(dist_vect) trans_rho = (trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten() return trans_rho, trans_theta", "== i) & (rho > 0)].mean() if len(rho[(bin_ix == i)", "correct format for LineCollection: an array of the form numlines", "np.arctan2(y, x) return rho, theta class PhenoSign(BaseTraj): \"\"\"Phenotypic Signature class.\"\"\"", "arr[:, 0], arr[:, 1] rho = np.sqrt(x ** 2 +", "BaseTraj: def __init__(self, model, X): self.model = model assert len(X.shape)", "mu_theta = self.cart2pol(self.means) state_dt = self.norm_state_time() bin_means_1, state_rd_bins, state_dt_bins =", "model, X): self.model = model assert len(X.shape) == 2, f\"X", "{X.shape}\" self.X = X self.means = self.model.means_.copy() self.states = self.model.predict(X)", "= truncate_colormap(cmap, minval=0.2) if projection == 'polar': y, x =", "is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) colorline(x, y, cmap=cmap, zorder=1)", "plot_states(self, ymax=None): \"\"\" Plot cell states. \"\"\" bin_rd, bin_dt =", "def rho_dt_bins(self, rho, theta, dt, bins=12): \"\"\" Bin rho values", "projection.lower() assert projection in avail_proj, f\"projection unknown: {projection}\" if projection", "x and y Optionally specify colors in the array z", "# to check for numerical input -- this is a", "arr (numpy.array): Array of shape [n_state x dims] \"\"\" x,", "plotting Hidden Markov Models. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.", "is None: z = np.linspace(0.0, 1.0, len(x)) # Special case", "hack z = np.array([z]) z = np.asarray(z) segments = make_segments(x,", "self.model = model assert len(X.shape) == 2, f\"X should be", "cmap = plt.get_cmap('binary') cmap = truncate_colormap(cmap, minval=0.2) if projection ==", "> 0)].sum() if len(dt[(bin_ix == i) & (dt > 0)])", "len(bins))] bin_dt = [dt[(bin_ix == i) & (dt > 0)].sum()", "# states mu_rho, mu_theta = self.cart2pol(self.means) state_dt = self.norm_state_time() bin_means_1,", "y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) return segments", "can\" \\ \"not be concatenated.\" return bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins,", "trans_dt = self.norm_trans_time() bin_means_2, trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta, trans_dt)", "for image-based phenotypic profiling. Integr Biol (Camb). 2016;8(1):73-90. \"\"\" def", "0], arr[:, 1] rho = np.sqrt(x ** 2 + y", "0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing transition dwell time', ticks=[0,", "if ymax is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) norm =", "theta = np.arctan2(y, x) return rho, theta class PhenoSign(BaseTraj): \"\"\"Phenotypic", "a colormap, a norm function and a line width \"\"\"", "for i in range(1, len(bins))] bin_dt = [dt[(bin_ix == i)", "loc=\"upper right\", bbox_to_anchor=(1.2, 0.94), title=\"States\") ax.add_artist(legend) if ymax is not", "width = (2 * np.pi) / N ax.bar(self.bin_means, bin_rd, width=width,", "cax=cax, orientation='vertical', label='Time') plt.show() return fig, ax def plot_states(self, ymax=None):", "1.0), linewidth=3, alpha=1.0, zorder=1): \"\"\" Plot a colored line with", "1] mu_x_dist = mu_x - mu_x[:, np.newaxis] mu_y_dist = mu_y", "plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Oranges\") N = 12", "- mu_y[:, np.newaxis] dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten())) trans_rho, trans_theta =", "\"\"\" points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1],", "= np.asarray(z) segments = make_segments(x, y) lc = mcoll.LineCollection(segments, array=z,", ":param bins: :return: \"\"\" bins = np.linspace(-np.pi, np.pi, bins+1) bin_means", "(bins[:-1] + bins[1:]) / 2 bin_ix = np.digitize(theta, bins) bin_rd", "trans_rho, trans_theta = self.transition_vectors() trans_dt = self.norm_trans_time() bin_means_2, trans_rd_bins, trans_dt_bins", "Biol (Camb). 2016;8(1):73-90. \"\"\" def __init__(self, model, X): super(Saphire, self).__init__(model,", "class Saphire(PhenoSign): \"\"\"Implementation of the SAPHIRE algorithm for plotting Hidden", "label='Time') plt.show() return fig, ax def plot_states(self, ymax=None): \"\"\" Plot", "return trans_rho, trans_theta def design_transition(self, thresh=0.1): design_trans = self.trans diag_ix", "truncate_colormap(cmap, minval=0.2) if projection == 'polar': y, x = self.cart2pol(self.X)", ":], self.signature[1, :] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'})", "fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing transition dwell time', ticks=[0, 0.5,", "bin_dt = [dt[(bin_ix == i) & (dt > 0)].sum() if", "__init__(self, model, X): super(PhenoSign, self).__init__(model, X) self.bin_means, self.signature = self.get_signature()", "x and y coordinates, in the correct format for LineCollection:", "class BaseTraj: def __init__(self, model, X): self.model = model assert", "list of line segments from x and y coordinates, in", "fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing state", "np.unique(self.states, return_counts=True) sort_ix = unique.argsort() counts = counts[sort_ix] return counts", "plt.get_cmap('binary') cmap = truncate_colormap(cmap, minval=0.2) if projection == 'polar': y,", "- mu_x[:, np.newaxis] mu_y_dist = mu_y - mu_y[:, np.newaxis] dist_vect", "profiling. Integr Biol (Camb). 2016;8(1):73-90. \"\"\" def __init__(self, model, X):", "plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Blues\") N = 12", "= mu_y - mu_y[:, np.newaxis] dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten())) trans_rho,", "X) def plot_traj(self, projection='cartesian', ymax=None): \"\"\" Plot cell trajectory. Args:", "design_trans def norm_trans_time(self): \"\"\" Normalized transition time. :return: \"\"\" unique,", "shape dynamics for image-based phenotypic profiling. Integr Biol (Camb). 2016;8(1):73-90.", "ax = plt.gca() ax.add_collection(lc) return lc def make_segments(x, y): \"\"\"", "len(bins))] return bin_means, bin_rd, bin_dt def transition_vectors(self): \"\"\" Transition vectors", "self.norm_state_time() bin_means_1, state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta, state_dt) # transitions", "Plot cell trajectory. Args: projection (str): cartesian or polar. ymax", "unknown: {projection}\" if projection == 'cartesian': projection = None cmap", "[4 x n_bins] with 1. state radial distances 2. state", "model. :return: bin_means, array of shape [4 x n_bins] with", "orientation='vertical', label='Increasing state dwell time', ticks=[0, 0.5, 1]) return fig,", "if ymax is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) colorline(x, y,", "return rho, theta class PhenoSign(BaseTraj): \"\"\"Phenotypic Signature class.\"\"\" def __init__(self,", "theta class PhenoSign(BaseTraj): \"\"\"Phenotypic Signature class.\"\"\" def __init__(self, model, X):", "polar coordinates. :return: \"\"\" mu_x, mu_y = self.means[:, 0], self.means[:,", "transition dwell times \"\"\" # states mu_rho, mu_theta = self.cart2pol(self.means)", "ymax is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) colorline(x, y, cmap=cmap,", "= mpl.colors.LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(np.linspace(minval, maxval, n))) return new_cmap", "get_signature(self): \"\"\" Calculate phenotypic signature for a given model. :return:", "radial distances 2. state dwell times 3. transition distances 3.", "bins = np.linspace(-np.pi, np.pi, bins+1) bin_means = (bins[:-1] + bins[1:])", "for a given model. :return: bin_means, array of shape [4", "= self.means[:, 0], self.means[:, 1] fig, ax = plt.subplots(figsize=(5, 5),", "np import matplotlib as mpl import matplotlib.pyplot as plt import", "\"\"\" Plot cell trajectory. Args: projection (str): cartesian or polar.", "def plot_states(self, ymax=None): \"\"\" Plot cell states. \"\"\" bin_rd, bin_dt", "np.newaxis] mu_y_dist = mu_y - mu_y[:, np.newaxis] dist_vect = np.column_stack((mu_x_dist.flatten(),", "mcoll from matplotlib.ticker import MaxNLocator plt.style.use('seaborn-darkgrid') class BaseTraj: def __init__(self,", "y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=3, alpha=1.0, zorder=1): \"\"\" Plot", "y_mu, x_mu = self.cart2pol(self.means) else: x, y = self.X[:, 0],", "array of the form numlines x (points per line) x", "dt / dt.sum() def norm_state_time(self): \"\"\" Normalized state time. :return:", "''' if isinstance(cmap, str): cmap = plt.get_cmap(cmap) new_cmap = mpl.colors.LinearSegmentedColormap.from_list(", "zorder=2, edgecolor='black', alpha=0.6) legend = ax.legend(*traj.legend_elements(), loc=\"upper right\", bbox_to_anchor=(1.2, 0.94),", "design_trans[design_trans >= thresh] = 1 return design_trans def norm_trans_time(self): \"\"\"", "Markov Models. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Time series", "if projection == 'cartesian': projection = None cmap = plt.get_cmap('binary')", "& (rho > 0)].mean() if len(rho[(bin_ix == i) & (rho", "2) segments = np.concatenate([points[:-1], points[1:]], axis=1) return segments def truncate_colormap(cmap,", "shape [4 x n_bins] with 1. state radial distances 2.", "class.\"\"\" def __init__(self, model, X): super(PhenoSign, self).__init__(model, X) self.bin_means, self.signature", "is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) norm = mpl.colors.Normalize(vmin=0, vmax=1)", "== i) & (rho > 0)]) > 0 else 0", "on polar coordinates. :return: \"\"\" mu_x, mu_y = self.means[:, 0],", "(Camb). 2016;8(1):73-90. \"\"\" def __init__(self, model, X): super(Saphire, self).__init__(model, X)", "def transition_vectors(self): \"\"\" Transition vectors between states on polar coordinates.", "states. \"\"\" bin_rd, bin_dt = self.signature[0, :], self.signature[1, :] fig,", "fig, ax def plot_transition(self, ymax=None): \"\"\" Plot transition between cell", "self.design_transition()).flatten() return trans_rho, trans_theta def design_transition(self, thresh=0.1): design_trans = self.trans", "design_trans = self.trans diag_ix = np.diag_indices(len(design_trans)) design_trans[diag_ix] = 0 design_trans[design_trans", "0 for i in range(1, len(bins))] return bin_means, bin_rd, bin_dt", "trans_rho, trans_theta def design_transition(self, thresh=0.1): design_trans = self.trans diag_ix =", "self.get_signature() def get_signature(self): \"\"\" Calculate phenotypic signature for a given", "right\", bbox_to_anchor=(1.2, 0.94), title=\"States\") ax.add_artist(legend) if ymax is not None:", "= self.norm_state_time() bin_means_1, state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta, state_dt) #", "arr[:, 1] rho = np.sqrt(x ** 2 + y **", "self.means[:, 1] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection}) ax.scatter(x,", "got shape {X.shape}\" self.X = X self.means = self.model.means_.copy() self.states", "i) & (rho > 0)]) > 0 else 0 for", "0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing state dwell", "plt.style.use('seaborn-darkgrid') class BaseTraj: def __init__(self, model, X): self.model = model", "= self.means[:, 0], self.means[:, 1] mu_x_dist = mu_x - mu_x[:,", "\"\"\" unique, counts = np.unique(self.states, return_counts=True) sort_ix = unique.argsort() counts", "fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Time') plt.show() return fig, ax def", "a norm function and a line width \"\"\" # Default", "@staticmethod def cart2pol(arr): \"\"\" Cartesion space to polar space. Args:", "1]) return fig, ax def plot_transition(self, ymax=None): \"\"\" Plot transition", "return segments def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): ''' https://stackoverflow.com/a/18926541 '''", "if len(rho[(bin_ix == i) & (rho > 0)]) > 0", "norm_trans_time(self): \"\"\" Normalized transition time. :return: \"\"\" unique, counts =", "rho values and dwell time on polar coordinates. :param rho:", "2) theta = np.arctan2(y, x) return rho, theta class PhenoSign(BaseTraj):", "def __init__(self, model, X): super(Saphire, self).__init__(model, X) def plot_traj(self, projection='cartesian',", "if isinstance(cmap, str): cmap = plt.get_cmap(cmap) new_cmap = mpl.colors.LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name,", "= plt.get_cmap(cmap) new_cmap = mpl.colors.LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(np.linspace(minval, maxval,", "len(dt[(bin_ix == i) & (dt > 0)]) > 0 else", "coordinates x and y Optionally specify colors in the array", "state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta, state_dt) # transitions trans_rho, trans_theta", "range(1, len(bins))] bin_dt = [dt[(bin_ix == i) & (dt >", "rho, theta class PhenoSign(BaseTraj): \"\"\"Phenotypic Signature class.\"\"\" def __init__(self, model,", "linewidth=3, alpha=1.0, zorder=1): \"\"\" Plot a colored line with coordinates", "['cartesian', 'polar'] projection = projection.lower() assert projection in avail_proj, f\"projection", "Cartesion space to polar space. Args: arr (numpy.array): Array of", "cmap='Set1', zorder=2) traj = ax.scatter(x_mu, y_mu, c=np.unique(self.states), cmap='Set1', s=200, zorder=2,", "not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) colorline(x, y, cmap=cmap, zorder=1) norm", "avail_proj = ['cartesian', 'polar'] projection = projection.lower() assert projection in", "cmap = truncate_colormap(cmap, minval=0.2) if projection == 'polar': y, x", "= self.trans diag_ix = np.diag_indices(len(design_trans)) design_trans[diag_ix] = 0 design_trans[design_trans <", "counts[sort_ix] # normalize by transition probability dt = (counts *", "def plot_transition(self, ymax=None): \"\"\" Plot transition between cell states. \"\"\"", "0.5, 1]) return fig, ax def colorline(x, y, z=None, cmap=plt.get_cmap('copper'),", "import numpy as np import matplotlib as mpl import matplotlib.pyplot", "coordinates. :param rho: :param theta: :param dt: :param bins: :return:", "super(PhenoSign, self).__init__(model, X) self.bin_means, self.signature = self.get_signature() def get_signature(self): \"\"\"", "legend = ax.legend(*traj.legend_elements(), loc=\"upper right\", bbox_to_anchor=(1.2, 0.94), title=\"States\") ax.add_artist(legend) if", "y): \"\"\" Create list of line segments from x and", "assert len(X.shape) == 2, f\"X should be 2-d, instead got", "self.model.means_.copy() self.states = self.model.predict(X) self.n_states = len(np.unique(self.states)) self.trans = self.model.transmat_.copy()", "norm_state_time(self): \"\"\" Normalized state time. :return: \"\"\" unique, counts =", "X self.means = self.model.means_.copy() self.states = self.model.predict(X) self.n_states = len(np.unique(self.states))", "import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.collections", "= 0 design_trans[design_trans >= thresh] = 1 return design_trans def", "design_trans[diag_ix] = 0 design_trans[design_trans < thresh] = 0 design_trans[design_trans >=", "self).__init__(model, X) self.bin_means, self.signature = self.get_signature() def get_signature(self): \"\"\" Calculate", "single number: if not hasattr(z, \"__iter__\"): # to check for", "cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha, zorder=zorder) ax = plt.gca() ax.add_collection(lc) return", "self.means = self.model.means_.copy() self.states = self.model.predict(X) self.n_states = len(np.unique(self.states)) self.trans", "self.means[:, 1] mu_x_dist = mu_x - mu_x[:, np.newaxis] mu_y_dist =", "-- this is a hack z = np.array([z]) z =", "cmap = plt.get_cmap(\"Blues\") N = 12 width = (2 *", "series modeling of live-cell shape dynamics for image-based phenotypic profiling.", "= self.cart2pol(dist_vect) trans_rho = (trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten() return trans_rho,", "X): super(PhenoSign, self).__init__(model, X) self.bin_means, self.signature = self.get_signature() def get_signature(self):", "= self.rho_dt_bins(trans_rho, trans_theta, trans_dt) assert (bin_means_1 == bin_means_2).all(), \"state and", "<NAME>, <NAME>, <NAME>. Time series modeling of live-cell shape dynamics", "live-cell shape dynamics for image-based phenotypic profiling. Integr Biol (Camb).", ":param theta: :param dt: :param bins: :return: \"\"\" bins =", "class PhenoSign(BaseTraj): \"\"\"Phenotypic Signature class.\"\"\" def __init__(self, model, X): super(PhenoSign,", "ymax=None): \"\"\" Plot cell trajectory. Args: projection (str): cartesian or", "return fig, ax def plot_states(self, ymax=None): \"\"\" Plot cell states.", "fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing state dwell time', ticks=[0, 0.5,", "minval=0.2) if projection == 'polar': y, x = self.cart2pol(self.X) y_mu,", "counts / counts.sum() @staticmethod def cart2pol(arr): \"\"\" Cartesion space to", "MaxNLocator plt.style.use('seaborn-darkgrid') class BaseTraj: def __init__(self, model, X): self.model =", "else 0 for i in range(1, len(bins))] bin_dt = [dt[(bin_ix", "else: x, y = self.X[:, 0], self.X[:, 1] x_mu, y_mu", "* self.design_transition()).flatten() return dt / dt.sum() def norm_state_time(self): \"\"\" Normalized", "\"\"\" # states mu_rho, mu_theta = self.cart2pol(self.means) state_dt = self.norm_state_time()", "model, X): super(PhenoSign, self).__init__(model, X) self.bin_means, self.signature = self.get_signature() def", "[0,1]: if z is None: z = np.linspace(0.0, 1.0, len(x))", "counts = counts[sort_ix] # normalize by transition probability dt =", "# transitions trans_rho, trans_theta = self.transition_vectors() trans_dt = self.norm_trans_time() bin_means_2,", "\"not be concatenated.\" return bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins)) class", "= None cmap = plt.get_cmap('binary') cmap = truncate_colormap(cmap, minval=0.2) if", "\"\"\" # Default colors equally spaced on [0,1]: if z", "label='Increasing transition dwell time', ticks=[0, 0.5, 1]) return fig, ax", "= np.unique(self.states, return_counts=True) sort_ix = unique.argsort() counts = counts[sort_ix] #", "N ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt)) if ymax is not None:", "return bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins)) class Saphire(PhenoSign): \"\"\"Implementation of", "modeling of live-cell shape dynamics for image-based phenotypic profiling. Integr", "\"\"\" bin_rd, bin_dt = self.signature[2, :], self.signature[3, :] fig, ax", "ax.scatter(x, y, c=self.states, cmap='Set1', zorder=2) traj = ax.scatter(x_mu, y_mu, c=np.unique(self.states),", "y, cmap=cmap, zorder=1) norm = mpl.colors.Normalize(vmin=0, vmax=48) cax = fig.add_axes([0.94,", "array of shape [4 x n_bins] with 1. state radial", "phenotypic profiling. Integr Biol (Camb). 2016;8(1):73-90. \"\"\" def __init__(self, model,", "fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Blues\")", "> 0 else 0 for i in range(1, len(bins))] return", "a single number: if not hasattr(z, \"__iter__\"): # to check", "bin_means_2, trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta, trans_dt) assert (bin_means_1 ==", "x 2 (x and y) array \"\"\" points = np.array([x,", "as mcoll from matplotlib.ticker import MaxNLocator plt.style.use('seaborn-darkgrid') class BaseTraj: def", "== 'cartesian': projection = None cmap = plt.get_cmap('binary') cmap =", "number: if not hasattr(z, \"__iter__\"): # to check for numerical", "0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing state dwell time',", "mu_y = self.means[:, 0], self.means[:, 1] mu_x_dist = mu_x -", "np.concatenate([points[:-1], points[1:]], axis=1) return segments def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):", "= self.signature[2, :], self.signature[3, :] fig, ax = plt.subplots(figsize=(5, 5),", "\"\"\" bins = np.linspace(-np.pi, np.pi, bins+1) bin_means = (bins[:-1] +", "transition distances 3. transition dwell times \"\"\" # states mu_rho,", "\"\"\" Normalized transition time. :return: \"\"\" unique, counts = np.unique(self.states,", "1] rho = np.sqrt(x ** 2 + y ** 2)", "Default colors equally spaced on [0,1]: if z is None:", "+ bins[1:]) / 2 bin_ix = np.digitize(theta, bins) bin_rd =", "ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) colorline(x, y, cmap=cmap, zorder=1) norm = mpl.colors.Normalize(vmin=0, vmax=48)", "(numpy.array): Array of shape [n_state x dims] \"\"\" x, y", "X): self.model = model assert len(X.shape) == 2, f\"X should", "thresh=0.1): design_trans = self.trans diag_ix = np.diag_indices(len(design_trans)) design_trans[diag_ix] = 0", "self.rho_dt_bins(trans_rho, trans_theta, trans_dt) assert (bin_means_1 == bin_means_2).all(), \"state and transition", "else 0 for i in range(1, len(bins))] return bin_means, bin_rd,", "z = np.asarray(z) segments = make_segments(x, y) lc = mcoll.LineCollection(segments,", "ax.yaxis.set_major_locator(MaxNLocator(integer=True)) colorline(x, y, cmap=cmap, zorder=1) norm = mpl.colors.Normalize(vmin=0, vmax=48) cax", "time', ticks=[0, 0.5, 1]) return fig, ax def plot_transition(self, ymax=None):", "self.states = self.model.predict(X) self.n_states = len(np.unique(self.states)) self.trans = self.model.transmat_.copy() def", "\"__iter__\"): # to check for numerical input -- this is", "'polar'}) cmap = plt.get_cmap(\"Oranges\") N = 12 width = (2", "matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.collections as", "/ counts.sum() @staticmethod def cart2pol(arr): \"\"\" Cartesion space to polar", "traj = ax.scatter(x_mu, y_mu, c=np.unique(self.states), cmap='Set1', s=200, zorder=2, edgecolor='black', alpha=0.6)", "self.design_transition()).flatten() return dt / dt.sum() def norm_state_time(self): \"\"\" Normalized state", "ticks=[0, 0.5, 1]) return fig, ax def colorline(x, y, z=None,", "f\"projection unknown: {projection}\" if projection == 'cartesian': projection = None", "= np.arctan2(y, x) return rho, theta class PhenoSign(BaseTraj): \"\"\"Phenotypic Signature", "def norm_trans_time(self): \"\"\" Normalized transition time. :return: \"\"\" unique, counts", "linewidth=linewidth, alpha=alpha, zorder=zorder) ax = plt.gca() ax.add_collection(lc) return lc def", "segments from x and y coordinates, in the correct format", "form numlines x (points per line) x 2 (x and", "on [0,1]: if z is None: z = np.linspace(0.0, 1.0,", "= np.array([z]) z = np.asarray(z) segments = make_segments(x, y) lc", "np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten())) trans_rho, trans_theta = self.cart2pol(dist_vect) trans_rho = (trans_rho.reshape((self.n_states, self.n_states))", "return dt / dt.sum() def norm_state_time(self): \"\"\" Normalized state time.", "len(x)) # Special case if a single number: if not", "x_mu, y_mu = self.means[:, 0], self.means[:, 1] fig, ax =", "ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt)) if ymax is not None: ax.set_ylim(0,", "(str): cartesian or polar. ymax (int) \"\"\" avail_proj = ['cartesian',", "ax def plot_states(self, ymax=None): \"\"\" Plot cell states. \"\"\" bin_rd,", "1.0, len(x)) # Special case if a single number: if", "by transition probability dt = (counts * self.design_transition()).flatten() return dt", "(rho > 0)]) > 0 else 0 for i in", "self).__init__(model, X) def plot_traj(self, projection='cartesian', ymax=None): \"\"\" Plot cell trajectory.", "i) & (rho > 0)].mean() if len(rho[(bin_ix == i) &", "self.signature[1, :] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap", "lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha, zorder=zorder) ax", "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Time series modeling of live-cell", "= len(np.unique(self.states)) self.trans = self.model.transmat_.copy() def rho_dt_bins(self, rho, theta, dt,", "= self.rho_dt_bins(mu_rho, mu_theta, state_dt) # transitions trans_rho, trans_theta = self.transition_vectors()", "> 0)]) > 0 else 0 for i in range(1,", "segments def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): ''' https://stackoverflow.com/a/18926541 ''' if", "ax.yaxis.set_major_locator(MaxNLocator(integer=True)) norm = mpl.colors.Normalize(vmin=0, vmax=1) cax = fig.add_axes([0.94, 0.15, 0.05,", "== 'polar': y, x = self.cart2pol(self.X) y_mu, x_mu = self.cart2pol(self.means)", ":], self.signature[3, :] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'})", "a given model. :return: bin_means, array of shape [4 x", "= (2 * np.pi) / N ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt))", "dims] \"\"\" x, y = arr[:, 0], arr[:, 1] rho", "projection == 'polar': y, x = self.cart2pol(self.X) y_mu, x_mu =", "projection == 'cartesian': projection = None cmap = plt.get_cmap('binary') cmap", "self.signature = self.get_signature() def get_signature(self): \"\"\" Calculate phenotypic signature for", "c=np.unique(self.states), cmap='Set1', s=200, zorder=2, edgecolor='black', alpha=0.6) legend = ax.legend(*traj.legend_elements(), loc=\"upper", "z Optionally specify a colormap, a norm function and a", "[dt[(bin_ix == i) & (dt > 0)].sum() if len(dt[(bin_ix ==", "with 1. state radial distances 2. state dwell times 3.", "mpl import matplotlib.pyplot as plt import matplotlib.collections as mcoll from", "n_bins] with 1. state radial distances 2. state dwell times", "(trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten() return trans_rho, trans_theta def design_transition(self, thresh=0.1):", "bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins)) class Saphire(PhenoSign): \"\"\"Implementation of the", "for plotting Hidden Markov Models. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,", "bin_dt = self.signature[0, :], self.signature[1, :] fig, ax = plt.subplots(figsize=(5,", "in the correct format for LineCollection: an array of the", "line segments from x and y coordinates, in the correct", "ticks=[0, 0.5, 1]) return fig, ax def plot_transition(self, ymax=None): \"\"\"", "fig, ax def plot_states(self, ymax=None): \"\"\" Plot cell states. \"\"\"", "state_dt = self.norm_state_time() bin_means_1, state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta, state_dt)", "0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing state dwell time', ticks=[0,", "np.linspace(0.0, 1.0, len(x)) # Special case if a single number:", "= ax.legend(*traj.legend_elements(), loc=\"upper right\", bbox_to_anchor=(1.2, 0.94), title=\"States\") ax.add_artist(legend) if ymax", "ax.scatter(x_mu, y_mu, c=np.unique(self.states), cmap='Set1', s=200, zorder=2, edgecolor='black', alpha=0.6) legend =", ":return: \"\"\" bins = np.linspace(-np.pi, np.pi, bins+1) bin_means = (bins[:-1]", "ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) colorline(x, y, cmap=cmap, zorder=1) norm = mpl.colors.Normalize(vmin=0,", "np.diag_indices(len(design_trans)) design_trans[diag_ix] = 0 design_trans[design_trans < thresh] = 0 design_trans[design_trans", "self.bin_means, self.signature = self.get_signature() def get_signature(self): \"\"\" Calculate phenotypic signature", "and y Optionally specify colors in the array z Optionally", "theta, dt, bins=12): \"\"\" Bin rho values and dwell time", "\"state and transition vectors are binned differently and can\" \\", "y = self.X[:, 0], self.X[:, 1] x_mu, y_mu = self.means[:,", "trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta, trans_dt) assert (bin_means_1 == bin_means_2).all(),", "0 else 0 for i in range(1, len(bins))] bin_dt =", "0 else 0 for i in range(1, len(bins))] return bin_means,", "0], self.means[:, 1] mu_x_dist = mu_x - mu_x[:, np.newaxis] mu_y_dist", "= plt.get_cmap(\"Blues\") N = 12 width = (2 * np.pi)", "ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Oranges\") N", "np.pi, bins+1) bin_means = (bins[:-1] + bins[1:]) / 2 bin_ix", "ax.legend(*traj.legend_elements(), loc=\"upper right\", bbox_to_anchor=(1.2, 0.94), title=\"States\") ax.add_artist(legend) if ymax is", "input -- this is a hack z = np.array([z]) z", "0.5, 1]) return fig, ax def plot_transition(self, ymax=None): \"\"\" Plot", "as plt import matplotlib.collections as mcoll from matplotlib.ticker import MaxNLocator", "transitions trans_rho, trans_theta = self.transition_vectors() trans_dt = self.norm_trans_time() bin_means_2, trans_rd_bins,", "s=200, zorder=2, edgecolor='black', alpha=0.6) legend = ax.legend(*traj.legend_elements(), loc=\"upper right\", bbox_to_anchor=(1.2,", "i in range(1, len(bins))] return bin_means, bin_rd, bin_dt def transition_vectors(self):", "trans_rd_bins, trans_dt_bins)) class Saphire(PhenoSign): \"\"\"Implementation of the SAPHIRE algorithm for", "trans_dt_bins)) class Saphire(PhenoSign): \"\"\"Implementation of the SAPHIRE algorithm for plotting", "* self.design_transition()).flatten() return trans_rho, trans_theta def design_transition(self, thresh=0.1): design_trans =", "axis=1) return segments def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): ''' https://stackoverflow.com/a/18926541", "bins+1) bin_means = (bins[:-1] + bins[1:]) / 2 bin_ix =", "+ y ** 2) theta = np.arctan2(y, x) return rho,", "self.trans = self.model.transmat_.copy() def rho_dt_bins(self, rho, theta, dt, bins=12): \"\"\"", "= np.linspace(0.0, 1.0, len(x)) # Special case if a single", "mu_x_dist = mu_x - mu_x[:, np.newaxis] mu_y_dist = mu_y -", "0 design_trans[design_trans >= thresh] = 1 return design_trans def norm_trans_time(self):", "= [dt[(bin_ix == i) & (dt > 0)].sum() if len(dt[(bin_ix", "cmap=cmap, zorder=1) norm = mpl.colors.Normalize(vmin=0, vmax=48) cax = fig.add_axes([0.94, 0.15,", "ax def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=3, alpha=1.0,", "trans_theta = self.cart2pol(dist_vect) trans_rho = (trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten() return", "mpl.colors.Normalize(vmin=0, vmax=48) cax = fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),", "line with coordinates x and y Optionally specify colors in", "and a line width \"\"\" # Default colors equally spaced", "y = arr[:, 0], arr[:, 1] rho = np.sqrt(x **", "trans_rho, trans_theta = self.cart2pol(dist_vect) trans_rho = (trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten()", "<NAME>, <NAME>. Time series modeling of live-cell shape dynamics for", "2 bin_ix = np.digitize(theta, bins) bin_rd = [rho[(bin_ix == i)", "\"\"\" Transition vectors between states on polar coordinates. :return: \"\"\"", "Plot cell states. \"\"\" bin_rd, bin_dt = self.signature[0, :], self.signature[1,", "is a hack z = np.array([z]) z = np.asarray(z) segments", ":param rho: :param theta: :param dt: :param bins: :return: \"\"\"", ":param dt: :param bins: :return: \"\"\" bins = np.linspace(-np.pi, np.pi,", "norm = mpl.colors.Normalize(vmin=0, vmax=1) cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])", "/ N ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt)) if ymax is not", "1] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection}) ax.scatter(x, y,", "shape {X.shape}\" self.X = X self.means = self.model.means_.copy() self.states =", "\"\"\" Cartesion space to polar space. Args: arr (numpy.array): Array", "rho: :param theta: :param dt: :param bins: :return: \"\"\" bins", "phenotypic signature for a given model. :return: bin_means, array of", "__init__(self, model, X): super(Saphire, self).__init__(model, X) def plot_traj(self, projection='cartesian', ymax=None):", "edgecolor='black', alpha=0.6) legend = ax.legend(*traj.legend_elements(), loc=\"upper right\", bbox_to_anchor=(1.2, 0.94), title=\"States\")", "numerical input -- this is a hack z = np.array([z])", "= self.model.transmat_.copy() def rho_dt_bins(self, rho, theta, dt, bins=12): \"\"\" Bin", "transition time. :return: \"\"\" unique, counts = np.unique(self.states, return_counts=True) sort_ix", "self.means[:, 0], self.means[:, 1] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection':", "= self.cart2pol(self.means) state_dt = self.norm_state_time() bin_means_1, state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho,", "ymax=None): \"\"\" Plot cell states. \"\"\" bin_rd, bin_dt = self.signature[0,", "cart2pol(arr): \"\"\" Cartesion space to polar space. Args: arr (numpy.array):", "time', ticks=[0, 0.5, 1]) return fig, ax def colorline(x, y,", "__init__(self, model, X): self.model = model assert len(X.shape) == 2,", "= X self.means = self.model.means_.copy() self.states = self.model.predict(X) self.n_states =", "segments = np.concatenate([points[:-1], points[1:]], axis=1) return segments def truncate_colormap(cmap, minval=0.0,", "0)]) > 0 else 0 for i in range(1, len(bins))]", "(dt > 0)]) > 0 else 0 for i in", "'polar': y, x = self.cart2pol(self.X) y_mu, x_mu = self.cart2pol(self.means) else:", "fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection}) ax.scatter(x, y, c=self.states,", "bin_rd, bin_dt = self.signature[0, :], self.signature[1, :] fig, ax =", "between cell states. \"\"\" bin_rd, bin_dt = self.signature[2, :], self.signature[3,", "self.transition_vectors() trans_dt = self.norm_trans_time() bin_means_2, trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta,", "ymax=None): \"\"\" Plot transition between cell states. \"\"\" bin_rd, bin_dt", "bins: :return: \"\"\" bins = np.linspace(-np.pi, np.pi, bins+1) bin_means =", "case if a single number: if not hasattr(z, \"__iter__\"): #", "dt = (counts * self.design_transition()).flatten() return dt / dt.sum() def", "# Default colors equally spaced on [0,1]: if z is", "self.cart2pol(self.means) else: x, y = self.X[:, 0], self.X[:, 1] x_mu,", "state_dt) # transitions trans_rho, trans_theta = self.transition_vectors() trans_dt = self.norm_trans_time()", "= mu_x - mu_x[:, np.newaxis] mu_y_dist = mu_y - mu_y[:,", "= unique.argsort() counts = counts[sort_ix] # normalize by transition probability", "\"\"\" x, y = arr[:, 0], arr[:, 1] rho =", "ymax (int) \"\"\" avail_proj = ['cartesian', 'polar'] projection = projection.lower()", "projection}) ax.scatter(x, y, c=self.states, cmap='Set1', zorder=2) traj = ax.scatter(x_mu, y_mu,", "np.newaxis] dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten())) trans_rho, trans_theta = self.cart2pol(dist_vect) trans_rho", "return_counts=True) sort_ix = unique.argsort() counts = counts[sort_ix] # normalize by", "cell states. \"\"\" bin_rd, bin_dt = self.signature[0, :], self.signature[1, :]", "array \"\"\" points = np.array([x, y]).T.reshape(-1, 1, 2) segments =", "\\ \"not be concatenated.\" return bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins))", "np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) return", "and y) array \"\"\" points = np.array([x, y]).T.reshape(-1, 1, 2)", "i in range(1, len(bins))] bin_dt = [dt[(bin_ix == i) &", "points[1:]], axis=1) return segments def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): '''", "dwell times 3. transition distances 3. transition dwell times \"\"\"", "== i) & (dt > 0)].sum() if len(dt[(bin_ix == i)", "def cart2pol(arr): \"\"\" Cartesion space to polar space. Args: arr", "assert projection in avail_proj, f\"projection unknown: {projection}\" if projection ==", "transition dwell time', ticks=[0, 0.5, 1]) return fig, ax def", "& (dt > 0)]) > 0 else 0 for i", "np.digitize(theta, bins) bin_rd = [rho[(bin_ix == i) & (rho >", ":return: \"\"\" mu_x, mu_y = self.means[:, 0], self.means[:, 1] mu_x_dist", "(counts * self.design_transition()).flatten() return dt / dt.sum() def norm_state_time(self): \"\"\"", "plt.show() return fig, ax def plot_states(self, ymax=None): \"\"\" Plot cell", "plt import matplotlib.collections as mcoll from matplotlib.ticker import MaxNLocator plt.style.use('seaborn-darkgrid')", "\"\"\" bin_rd, bin_dt = self.signature[0, :], self.signature[1, :] fig, ax", ":] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap =", "unique.argsort() counts = counts[sort_ix] # normalize by transition probability dt", "LineCollection: an array of the form numlines x (points per", "\"\"\" Plot transition between cell states. \"\"\" bin_rd, bin_dt =", "plt.get_cmap(cmap) new_cmap = mpl.colors.LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(np.linspace(minval, maxval, n)))", "the form numlines x (points per line) x 2 (x", "cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=3, alpha=1.0, zorder=1): \"\"\" Plot a colored", "np.linspace(-np.pi, np.pi, bins+1) bin_means = (bins[:-1] + bins[1:]) / 2", "title=\"States\") ax.add_artist(legend) if ymax is not None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True))", "= (bins[:-1] + bins[1:]) / 2 bin_ix = np.digitize(theta, bins)", "X) self.bin_means, self.signature = self.get_signature() def get_signature(self): \"\"\" Calculate phenotypic", "1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) return segments def", "y_mu = self.means[:, 0], self.means[:, 1] fig, ax = plt.subplots(figsize=(5,", "n=100): ''' https://stackoverflow.com/a/18926541 ''' if isinstance(cmap, str): cmap = plt.get_cmap(cmap)", "probability dt = (counts * self.design_transition()).flatten() return dt / dt.sum()", "unique.argsort() counts = counts[sort_ix] return counts / counts.sum() @staticmethod def", "polar. ymax (int) \"\"\" avail_proj = ['cartesian', 'polar'] projection =", "counts = counts[sort_ix] return counts / counts.sum() @staticmethod def cart2pol(arr):", "PhenoSign(BaseTraj): \"\"\"Phenotypic Signature class.\"\"\" def __init__(self, model, X): super(PhenoSign, self).__init__(model,", "Args: arr (numpy.array): Array of shape [n_state x dims] \"\"\"", "None: ax.set_ylim(0, ymax) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) norm = mpl.colors.Normalize(vmin=0, vmax=1) cax =", "= np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1)", "def design_transition(self, thresh=0.1): design_trans = self.trans diag_ix = np.diag_indices(len(design_trans)) design_trans[diag_ix]", "in the array z Optionally specify a colormap, a norm", "function and a line width \"\"\" # Default colors equally", "counts = np.unique(self.states, return_counts=True) sort_ix = unique.argsort() counts = counts[sort_ix]", "matplotlib.pyplot as plt import matplotlib.collections as mcoll from matplotlib.ticker import", "= ['cartesian', 'polar'] projection = projection.lower() assert projection in avail_proj,", "= mpl.colors.Normalize(vmin=0, vmax=1) cax = fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm,", "= model assert len(X.shape) == 2, f\"X should be 2-d,", "def __init__(self, model, X): self.model = model assert len(X.shape) ==", "return design_trans def norm_trans_time(self): \"\"\" Normalized transition time. :return: \"\"\"", "the array z Optionally specify a colormap, a norm function", "< thresh] = 0 design_trans[design_trans >= thresh] = 1 return", "distances 2. state dwell times 3. transition distances 3. transition", "specify a colormap, a norm function and a line width", "for i in range(1, len(bins))] return bin_means, bin_rd, bin_dt def", "1. state radial distances 2. state dwell times 3. transition", "in range(1, len(bins))] return bin_means, bin_rd, bin_dt def transition_vectors(self): \"\"\"", "def norm_state_time(self): \"\"\" Normalized state time. :return: \"\"\" unique, counts", "hasattr(z, \"__iter__\"): # to check for numerical input -- this", "0.94), title=\"States\") ax.add_artist(legend) if ymax is not None: ax.set_ylim(0, ymax)", "= make_segments(x, y) lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth,", "/ dt.sum() def norm_state_time(self): \"\"\" Normalized state time. :return: \"\"\"", "return lc def make_segments(x, y): \"\"\" Create list of line", "of live-cell shape dynamics for image-based phenotypic profiling. Integr Biol", "y Optionally specify colors in the array z Optionally specify", "of shape [4 x n_bins] with 1. state radial distances", "cmap=cmap), cax=cax, orientation='vertical', label='Time') plt.show() return fig, ax def plot_states(self,", "transition between cell states. \"\"\" bin_rd, bin_dt = self.signature[2, :],", "distances 3. transition dwell times \"\"\" # states mu_rho, mu_theta", "\"\"\" Normalized state time. :return: \"\"\" unique, counts = np.unique(self.states,", "ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection}) ax.scatter(x, y, c=self.states, cmap='Set1',", "norm=norm, linewidth=linewidth, alpha=alpha, zorder=zorder) ax = plt.gca() ax.add_collection(lc) return lc", "= np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten())) trans_rho, trans_theta = self.cart2pol(dist_vect) trans_rho = (trans_rho.reshape((self.n_states,", "trans_theta, trans_dt) assert (bin_means_1 == bin_means_2).all(), \"state and transition vectors", "normalize by transition probability dt = (counts * self.design_transition()).flatten() return", "array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha, zorder=zorder) ax = plt.gca() ax.add_collection(lc)", "coordinates, in the correct format for LineCollection: an array of", "trans_rho = (trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten() return trans_rho, trans_theta def", "cax=cax, orientation='vertical', label='Increasing state dwell time', ticks=[0, 0.5, 1]) return", "model, X): super(Saphire, self).__init__(model, X) def plot_traj(self, projection='cartesian', ymax=None): \"\"\"", "alpha=0.6) legend = ax.legend(*traj.legend_elements(), loc=\"upper right\", bbox_to_anchor=(1.2, 0.94), title=\"States\") ax.add_artist(legend)", "counts[sort_ix] return counts / counts.sum() @staticmethod def cart2pol(arr): \"\"\" Cartesion", "5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Blues\") N = 12 width", "Hidden Markov Models. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Time", "\"\"\" mu_x, mu_y = self.means[:, 0], self.means[:, 1] mu_x_dist =", "Array of shape [n_state x dims] \"\"\" x, y =", "y) lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha, zorder=zorder)", "times \"\"\" # states mu_rho, mu_theta = self.cart2pol(self.means) state_dt =", "self.cart2pol(self.X) y_mu, x_mu = self.cart2pol(self.means) else: x, y = self.X[:,", "space to polar space. Args: arr (numpy.array): Array of shape", "self.signature[3, :] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'}) cmap", "bin_dt def transition_vectors(self): \"\"\" Transition vectors between states on polar", "design_transition(self, thresh=0.1): design_trans = self.trans diag_ix = np.diag_indices(len(design_trans)) design_trans[diag_ix] =", "counts.sum() @staticmethod def cart2pol(arr): \"\"\" Cartesion space to polar space.", "subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Oranges\") N = 12 width =", "bin_means = (bins[:-1] + bins[1:]) / 2 bin_ix = np.digitize(theta,", "concatenated.\" return bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins)) class Saphire(PhenoSign): \"\"\"Implementation", "cax=cax, orientation='vertical', label='Increasing transition dwell time', ticks=[0, 0.5, 1]) return", "if len(dt[(bin_ix == i) & (dt > 0)]) > 0", "[n_state x dims] \"\"\" x, y = arr[:, 0], arr[:,", "12 width = (2 * np.pi) / N ax.bar(self.bin_means, bin_rd,", "def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=3, alpha=1.0, zorder=1):", "bin_means, bin_rd, bin_dt def transition_vectors(self): \"\"\" Transition vectors between states", "(points per line) x 2 (x and y) array \"\"\"", "= mpl.colors.Normalize(vmin=0, vmax=48) cax = fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm,", "def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): ''' https://stackoverflow.com/a/18926541 ''' if isinstance(cmap,", "Calculate phenotypic signature for a given model. :return: bin_means, array", "the SAPHIRE algorithm for plotting Hidden Markov Models. <NAME>, <NAME>,", "0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing transition dwell time',", "''' https://stackoverflow.com/a/18926541 ''' if isinstance(cmap, str): cmap = plt.get_cmap(cmap) new_cmap", "cell states. \"\"\" bin_rd, bin_dt = self.signature[2, :], self.signature[3, :]", "Bin rho values and dwell time on polar coordinates. :param", "* np.pi) / N ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt)) if ymax", "projection='cartesian', ymax=None): \"\"\" Plot cell trajectory. Args: projection (str): cartesian", "z is None: z = np.linspace(0.0, 1.0, len(x)) # Special", "unique, counts = np.unique(self.states, return_counts=True) sort_ix = unique.argsort() counts =", "return fig, ax def plot_transition(self, ymax=None): \"\"\" Plot transition between", "= np.linspace(-np.pi, np.pi, bins+1) bin_means = (bins[:-1] + bins[1:]) /", "0], self.means[:, 1] fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection})", "label='Increasing state dwell time', ticks=[0, 0.5, 1]) return fig, ax", "self.rho_dt_bins(mu_rho, mu_theta, state_dt) # transitions trans_rho, trans_theta = self.transition_vectors() trans_dt", "0)].mean() if len(rho[(bin_ix == i) & (rho > 0)]) >", "fig.add_axes([0.94, 0.15, 0.05, 0.3]) fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Increasing transition", "times 3. transition distances 3. transition dwell times \"\"\" #", "state radial distances 2. state dwell times 3. transition distances", "plt.get_cmap(\"Blues\") N = 12 width = (2 * np.pi) /", "5), subplot_kw={'projection': 'polar'}) cmap = plt.get_cmap(\"Oranges\") N = 12 width" ]
[ "redirect from django.views import View from django.views.generic import ( ListView,", "user = Account.objects.filter(is_superuser=False).count() rest = Restaurant.objects.all().count() rating = RestaurantReview.objects.exclude(rating__isnull=True).count() review", "import * from account.forms import * from data.models import *", "user = Account.objects.get(account_id=pk) form = editUserForm(instance=user) extra_context = { \"form\":", "= { \"form\": form, \"pk\": pk } return render(request, \"profile.html\",", "'accounts.view_account' template_name = \"userList.html\" queryset = Account.objects.all() class UpdateProfilePicView(View, LoginRequiredMixin,", "messages.success(request, \"Successfully updated profile!\") return redirect(f'/viewUser/{user.account_id}') else: form = editUserForm(instance=user)", "post(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form =", "form.is_valid(): user = form.save() role = request.POST.get(\"role\") user.save() messages.success(request, \"Successfully", "from django.contrib.auth.models import auth from django.contrib import messages from django.contrib.auth.mixins", "get(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form =", "\"rest_count\" : rest, \"rating_count\" : rating, \"review_count\" : review, }", "from django.contrib import messages from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin #", "your login processes') return redirect(\"login\") else: form = loginForm() create_form", "remember_me: request.session.set_expiry(1209600) return redirect(\"home\") else: messages.warning(request, 'There is an issue", "import render, redirect from django.views import View from django.views.generic import", "pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = editUserForm(instance=user) extra_context", "def post(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) user.profile_pic", "**kwargs): user = Account.objects.get(account_id=pk) form = editUserForm(instance=user) extra_context = {", "\"login.html\", context) def logout(request): auth.logout(request) return redirect(\"login\") def register(request): if", "LoginRequiredMixin # Create your views here. def login(request): if request.method", "\"POST\": form = loginForm(data=request.POST) if form.is_valid(): user = form.get_user() auth_login(request,", "\"rating_count\" : rating, \"review_count\" : review, } return render(request, \"home.html\",", "valid information.\") return render(request, \"editUser.html\", extra_context) def get(self, request, pk,", "django.contrib import messages from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin # Create", "return render(request, \"login.html\", context) def logout(request): auth.logout(request) return redirect(\"login\") def", "from venv import create from django.shortcuts import render, redirect from", "class EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception = True", "login processes') return redirect(\"login\") else: form = loginForm() create_form =", "= RestaurantReview.objects.exclude(review__isnull=True).count() context = { \"user_count\" : user, \"rest_count\" :", "= { \"form\": form, } return render(request, \"editUser.html\", extra_context) class", "Account.objects.all() class UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception =", "review = RestaurantReview.objects.exclude(review__isnull=True).count() context = { \"user_count\" : user, \"rest_count\"", "import PermissionRequiredMixin, LoginRequiredMixin # Create your views here. def login(request):", "register(request): if request.method == \"POST\": create_form = createUserForm(data=request.POST) if create_form.is_valid():", "loginForm(data=request.POST) if form.is_valid(): user = form.get_user() auth_login(request, user) print(\"succesful login\")", "a valid information.\") return render(request, \"editUser.html\", extra_context) def get(self, request,", "= Account.objects.get(account_id=pk) form = editUserForm(instance=user) extra_context = { \"form\": form,", "PermissionRequiredMixin, ListView): permission_required = 'accounts.view_account' template_name = \"userList.html\" queryset =", "viewUserForm(request.POST, instance=user) return redirect(\"userlist\") def get(self, request, pk, *args, **kwargs):", ": review, } return render(request, \"home.html\", context) class ViewUserView(View, LoginRequiredMixin,", "createUserForm(data=request.POST) if create_form.is_valid(): user = create_form.save(commit=False) user.save() messages.success(request, \"User created", "= create_form.save(commit=False) user.save() messages.success(request, \"User created successfully!\") return redirect(\"login\") else:", "messages.success(request, \"User created successfully!\") return redirect(\"login\") else: messages.error(request, \"User creation", "form.get_user() auth_login(request, user) print(\"succesful login\") remember_me = form.cleaned_data[\"remember_me\"] if remember_me:", "= editUserForm(instance=user) extra_context = { \"form\": form, } return render(request,", "queryset = Account.objects.all() class UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account'", "**kwargs): user = Account.objects.get(account_id=pk) user.profile_pic = request.FILES.get('profile-pic') user.save() return redirect('viewUser',", "= Account.objects.get(account_id=pk) form = viewUserForm(request.POST, instance=user) return redirect(\"userlist\") def get(self,", "user = Account.objects.get(account_id=pk) form = viewUserForm(instance=user) context = { \"form\":", "information.\") return render(request, \"editUser.html\", extra_context) def get(self, request, pk, *args,", "Account.objects.filter(is_superuser=False).count() rest = Restaurant.objects.all().count() rating = RestaurantReview.objects.exclude(rating__isnull=True).count() review = RestaurantReview.objects.exclude(review__isnull=True).count()", "form = editUserForm(instance=user) extra_context = { \"form\": form, } return", "user = form.save() role = request.POST.get(\"role\") user.save() messages.success(request, \"Successfully updated", "\"profile.html\", context) class EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception", "user = form.get_user() auth_login(request, user) print(\"succesful login\") remember_me = form.cleaned_data[\"remember_me\"]", "auth_login from django.contrib.auth.models import auth from django.contrib import messages from", "Please input a valid information.\") return render(request, \"editUser.html\", extra_context) def", "else: messages.error(request, \"User creation failed\") else: create_form = createUserForm() return", "login as auth_login from django.contrib.auth.models import auth from django.contrib import", "} print('something wrong') messages.error(request, \"Invalid input! Please input a valid", "django.views import View from django.views.generic import ( ListView, ) from", "creation failed\") else: create_form = createUserForm() return render(request, \"login.html\", {\"create_form\":", "pk, *args, **kwargs): user = Account.objects.get(account_id=pk) user.profile_pic = request.FILES.get('profile-pic') user.save()", "create_form = createUserForm() return render(request, \"login.html\", {\"create_form\": create_form}) def homepage(request):", "updated profile!\") return redirect(f'/viewUser/{user.account_id}') else: form = editUserForm(instance=user) extra_context =", "= { \"form\": form, } print('something wrong') messages.error(request, \"Invalid input!", "import imp from venv import create from django.shortcuts import render,", "def get(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form", "} return render(request, \"profile.html\", context) class EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required", "= form.save() role = request.POST.get(\"role\") user.save() messages.success(request, \"Successfully updated profile!\")", "user.save() messages.success(request, \"Successfully updated profile!\") return redirect(f'/viewUser/{user.account_id}') else: form =", "redirect(f'/viewUser/{user.account_id}') else: form = editUserForm(instance=user) extra_context = { \"form\": form,", "views here. def login(request): if request.method == \"POST\": form =", "= createUserForm() return render(request, \"login.html\", {\"create_form\": create_form}) def homepage(request): user", "if remember_me: request.session.set_expiry(1209600) return redirect(\"home\") else: messages.warning(request, 'There is an", "\"user_count\" : user, \"rest_count\" : rest, \"rating_count\" : rating, \"review_count\"", "post(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) user.profile_pic =", "redirect(\"userlist\") def get(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk)", "if form.is_valid(): user = form.get_user() auth_login(request, user) print(\"succesful login\") remember_me", "= form.cleaned_data[\"remember_me\"] if remember_me: request.session.set_expiry(1209600) return redirect(\"home\") else: messages.warning(request, 'There", "ListView): permission_required = 'accounts.view_account' template_name = \"userList.html\" queryset = Account.objects.all()", "issue with your login processes') return redirect(\"login\") else: form =", "LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception = True def post(self,", "an issue with your login processes') return redirect(\"login\") else: form", "print('something wrong') messages.error(request, \"Invalid input! Please input a valid information.\")", "create_form.is_valid(): user = create_form.save(commit=False) user.save() messages.success(request, \"User created successfully!\") return", "PermissionRequiredMixin, LoginRequiredMixin # Create your views here. def login(request): if", "= Account.objects.get(account_id=pk) form = editUserForm(request.POST, instance=user) if form.is_valid(): user =", "data.models import * from django.contrib.auth import login as auth_login from", "rest = Restaurant.objects.all().count() rating = RestaurantReview.objects.exclude(rating__isnull=True).count() review = RestaurantReview.objects.exclude(review__isnull=True).count() context", "processes') return redirect(\"login\") else: form = loginForm() create_form = createUserForm()", "if request.method == \"POST\": form = loginForm(data=request.POST) if form.is_valid(): user", "create from django.shortcuts import render, redirect from django.views import View", "* from account.forms import * from data.models import * from", "== \"POST\": create_form = createUserForm(data=request.POST) if create_form.is_valid(): user = create_form.save(commit=False)", "View from django.views.generic import ( ListView, ) from account.models import", "return redirect(\"login\") else: form = loginForm() create_form = createUserForm() context", "} return render(request, \"login.html\", context) def logout(request): auth.logout(request) return redirect(\"login\")", "form, \"create_form\": create_form } return render(request, \"login.html\", context) def logout(request):", "= True def post(self, request, pk, *args, **kwargs): user =", "PermissionRequiredMixin): permission_required = 'accounts.view_account' raise_exception = True def post(self, request,", "def login(request): if request.method == \"POST\": form = loginForm(data=request.POST) if", "'accounts.change_account' raise_exception = True def post(self, request, pk, *args, **kwargs):", "user = Account.objects.get(account_id=pk) user.profile_pic = request.FILES.get('profile-pic') user.save() return redirect('viewUser', pk)", "\"POST\": create_form = createUserForm(data=request.POST) if create_form.is_valid(): user = create_form.save(commit=False) user.save()", "*args, **kwargs): user = Account.objects.get(account_id=pk) form = editUserForm(request.POST, instance=user) if", "= request.FILES.get('profile-pic') user.save() return redirect('viewUser', pk) def deleteUser(request, event_id): event", "pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = viewUserForm(instance=user) context", "permission_required = 'accounts.view_account' template_name = \"userList.html\" queryset = Account.objects.all() class", "from data.models import * from django.contrib.auth import login as auth_login", "redirect(\"login\") else: messages.error(request, \"User creation failed\") else: create_form = createUserForm()", "\"review_count\" : review, } return render(request, \"home.html\", context) class ViewUserView(View,", "\"form\": form, \"pk\": pk } return render(request, \"profile.html\", context) class", "= 'accounts.change_account' raise_exception = True def post(self, request, pk, *args,", "request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = viewUserForm(request.POST,", "input! Please input a valid information.\") return render(request, \"editUser.html\", extra_context)", "messages.warning(request, 'There is an issue with your login processes') return", "from django.contrib.auth import login as auth_login from django.contrib.auth.models import auth", ": user, \"rest_count\" : rest, \"rating_count\" : rating, \"review_count\" :", "else: messages.warning(request, 'There is an issue with your login processes')", "context) def logout(request): auth.logout(request) return redirect(\"login\") def register(request): if request.method", "pk) def deleteUser(request, event_id): event = Account.objects.get(pk=event_id) event.delete() return redirect('userlist')", "loginForm() create_form = createUserForm() context = { \"form\": form, \"create_form\":", "if form.is_valid(): user = form.save() role = request.POST.get(\"role\") user.save() messages.success(request,", "\"editUser.html\", extra_context) def get(self, request, pk, *args, **kwargs): user =", "account.models import * from account.forms import * from data.models import", "def register(request): if request.method == \"POST\": create_form = createUserForm(data=request.POST) if", "return render(request, \"editUser.html\", extra_context) def get(self, request, pk, *args, **kwargs):", "\"Successfully updated profile!\") return redirect(f'/viewUser/{user.account_id}') else: form = editUserForm(instance=user) extra_context", "*args, **kwargs): user = Account.objects.get(account_id=pk) form = editUserForm(instance=user) extra_context =", "from django.views import View from django.views.generic import ( ListView, )", "# Create your views here. def login(request): if request.method ==", "def homepage(request): user = Account.objects.filter(is_superuser=False).count() rest = Restaurant.objects.all().count() rating =", "login\") remember_me = form.cleaned_data[\"remember_me\"] if remember_me: request.session.set_expiry(1209600) return redirect(\"home\") else:", "import auth from django.contrib import messages from django.contrib.auth.mixins import PermissionRequiredMixin,", "= Restaurant.objects.all().count() rating = RestaurantReview.objects.exclude(rating__isnull=True).count() review = RestaurantReview.objects.exclude(review__isnull=True).count() context =", "createUserForm() context = { \"form\": form, \"create_form\": create_form } return", "redirect(\"home\") else: messages.warning(request, 'There is an issue with your login", "render, redirect from django.views import View from django.views.generic import (", "* from data.models import * from django.contrib.auth import login as", "permission_required = 'accounts.view_account' raise_exception = True def post(self, request, pk,", "extra_context = { \"form\": form, } return render(request, \"editUser.html\", extra_context)", "render(request, \"home.html\", context) class ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.view_account'", "= Account.objects.get(account_id=pk) form = viewUserForm(instance=user) context = { \"form\": form,", "user = create_form.save(commit=False) user.save() messages.success(request, \"User created successfully!\") return redirect(\"login\")", "= 'accounts.view_account' template_name = \"userList.html\" queryset = Account.objects.all() class UpdateProfilePicView(View,", "review, } return render(request, \"home.html\", context) class ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin):", "EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception = True def", "True def post(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk)", "def logout(request): auth.logout(request) return redirect(\"login\") def register(request): if request.method ==", "context = { \"user_count\" : user, \"rest_count\" : rest, \"rating_count\"", "create_form } return render(request, \"login.html\", context) def logout(request): auth.logout(request) return", "form = editUserForm(instance=user) extra_context = { \"form\": form, } print('something", "return redirect('viewUser', pk) def deleteUser(request, event_id): event = Account.objects.get(pk=event_id) event.delete()", "editUserForm(instance=user) extra_context = { \"form\": form, } return render(request, \"editUser.html\",", "import * from data.models import * from django.contrib.auth import login", "**kwargs): user = Account.objects.get(account_id=pk) form = editUserForm(request.POST, instance=user) if form.is_valid():", "return render(request, \"profile.html\", context) class EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required =", "= { \"form\": form, \"create_form\": create_form } return render(request, \"login.html\",", "here. def login(request): if request.method == \"POST\": form = loginForm(data=request.POST)", "else: create_form = createUserForm() return render(request, \"login.html\", {\"create_form\": create_form}) def", "= createUserForm(data=request.POST) if create_form.is_valid(): user = create_form.save(commit=False) user.save() messages.success(request, \"User", "return render(request, \"editUser.html\", extra_context) class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView): permission_required =", "PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception = True def post(self, request,", "\"login.html\", {\"create_form\": create_form}) def homepage(request): user = Account.objects.filter(is_superuser=False).count() rest =", "user) print(\"succesful login\") remember_me = form.cleaned_data[\"remember_me\"] if remember_me: request.session.set_expiry(1209600) return", "request.FILES.get('profile-pic') user.save() return redirect('viewUser', pk) def deleteUser(request, event_id): event =", "render(request, \"profile.html\", context) class EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account'", "django.views.generic import ( ListView, ) from account.models import * from", "user = Account.objects.get(account_id=pk) form = viewUserForm(request.POST, instance=user) return redirect(\"userlist\") def", "Account.objects.get(account_id=pk) user.profile_pic = request.FILES.get('profile-pic') user.save() return redirect('viewUser', pk) def deleteUser(request,", "import login as auth_login from django.contrib.auth.models import auth from django.contrib", "= \"userList.html\" queryset = Account.objects.all() class UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required", "create_form = createUserForm() context = { \"form\": form, \"create_form\": create_form", "django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin # Create your views here. def", "extra_context = { \"form\": form, } print('something wrong') messages.error(request, \"Invalid", "messages.error(request, \"User creation failed\") else: create_form = createUserForm() return render(request,", "class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView): permission_required = 'accounts.view_account' template_name = \"userList.html\"", "import View from django.views.generic import ( ListView, ) from account.models", "render(request, \"editUser.html\", extra_context) def get(self, request, pk, *args, **kwargs): user", "return redirect(\"userlist\") def get(self, request, pk, *args, **kwargs): user =", "viewUserForm(instance=user) context = { \"form\": form, \"pk\": pk } return", "= request.POST.get(\"role\") user.save() messages.success(request, \"Successfully updated profile!\") return redirect(f'/viewUser/{user.account_id}') else:", "\"home.html\", context) class ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.view_account' raise_exception", "create_form = createUserForm(data=request.POST) if create_form.is_valid(): user = create_form.save(commit=False) user.save() messages.success(request,", "'There is an issue with your login processes') return redirect(\"login\")", "= Account.objects.get(account_id=pk) user.profile_pic = request.FILES.get('profile-pic') user.save() return redirect('viewUser', pk) def", "account.forms import * from data.models import * from django.contrib.auth import", "UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView): permission_required = 'accounts.view_account' template_name = \"userList.html\" queryset", "Account.objects.get(account_id=pk) form = viewUserForm(request.POST, instance=user) return redirect(\"userlist\") def get(self, request,", "*args, **kwargs): user = Account.objects.get(account_id=pk) user.profile_pic = request.FILES.get('profile-pic') user.save() return", "( ListView, ) from account.models import * from account.forms import", "createUserForm() return render(request, \"login.html\", {\"create_form\": create_form}) def homepage(request): user =", "\"User creation failed\") else: create_form = createUserForm() return render(request, \"login.html\",", "request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = viewUserForm(instance=user)", "\"Invalid input! Please input a valid information.\") return render(request, \"editUser.html\",", "render(request, \"editUser.html\", extra_context) class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView): permission_required = 'accounts.view_account'", "wrong') messages.error(request, \"Invalid input! Please input a valid information.\") return", "form = loginForm() create_form = createUserForm() context = { \"form\":", "= editUserForm(request.POST, instance=user) if form.is_valid(): user = form.save() role =", "} return render(request, \"home.html\", context) class ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required", "class UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception = True", "editUserForm(instance=user) extra_context = { \"form\": form, } print('something wrong') messages.error(request,", "import * from django.contrib.auth import login as auth_login from django.contrib.auth.models", "return redirect(\"login\") else: messages.error(request, \"User creation failed\") else: create_form =", "= Account.objects.all() class UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception", "= loginForm() create_form = createUserForm() context = { \"form\": form,", "from django.shortcuts import render, redirect from django.views import View from", "django.contrib.auth import login as auth_login from django.contrib.auth.models import auth from", "request.session.set_expiry(1209600) return redirect(\"home\") else: messages.warning(request, 'There is an issue with", "logout(request): auth.logout(request) return redirect(\"login\") def register(request): if request.method == \"POST\":", "login(request): if request.method == \"POST\": form = loginForm(data=request.POST) if form.is_valid():", "form = editUserForm(request.POST, instance=user) if form.is_valid(): user = form.save() role", "redirect(\"login\") else: form = loginForm() create_form = createUserForm() context =", "user.save() return redirect('viewUser', pk) def deleteUser(request, event_id): event = Account.objects.get(pk=event_id)", "request.method == \"POST\": form = loginForm(data=request.POST) if form.is_valid(): user =", "render(request, \"login.html\", {\"create_form\": create_form}) def homepage(request): user = Account.objects.filter(is_superuser=False).count() rest", "pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = viewUserForm(request.POST, instance=user)", "return redirect(\"login\") def register(request): if request.method == \"POST\": create_form =", ": rest, \"rating_count\" : rating, \"review_count\" : review, } return", "pk } return render(request, \"profile.html\", context) class EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin):", "request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = editUserForm(request.POST,", "input a valid information.\") return render(request, \"editUser.html\", extra_context) def get(self,", "= { \"user_count\" : user, \"rest_count\" : rest, \"rating_count\" :", "import create from django.shortcuts import render, redirect from django.views import", "return render(request, \"home.html\", context) class ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required =", "\"form\": form, } print('something wrong') messages.error(request, \"Invalid input! Please input", "permission_required = 'accounts.change_account' raise_exception = True def post(self, request, pk,", "request.method == \"POST\": create_form = createUserForm(data=request.POST) if create_form.is_valid(): user =", "Restaurant.objects.all().count() rating = RestaurantReview.objects.exclude(rating__isnull=True).count() review = RestaurantReview.objects.exclude(review__isnull=True).count() context = {", "profile!\") return redirect(f'/viewUser/{user.account_id}') else: form = editUserForm(instance=user) extra_context = {", "your views here. def login(request): if request.method == \"POST\": form", "ListView, ) from account.models import * from account.forms import *", "rating = RestaurantReview.objects.exclude(rating__isnull=True).count() review = RestaurantReview.objects.exclude(review__isnull=True).count() context = { \"user_count\"", "request.POST.get(\"role\") user.save() messages.success(request, \"Successfully updated profile!\") return redirect(f'/viewUser/{user.account_id}') else: form", "homepage(request): user = Account.objects.filter(is_superuser=False).count() rest = Restaurant.objects.all().count() rating = RestaurantReview.objects.exclude(rating__isnull=True).count()", "'accounts.view_account' raise_exception = True def post(self, request, pk, *args, **kwargs):", "django.shortcuts import render, redirect from django.views import View from django.views.generic", "= form.get_user() auth_login(request, user) print(\"succesful login\") remember_me = form.cleaned_data[\"remember_me\"] if", "auth_login(request, user) print(\"succesful login\") remember_me = form.cleaned_data[\"remember_me\"] if remember_me: request.session.set_expiry(1209600)", "{ \"user_count\" : user, \"rest_count\" : rest, \"rating_count\" : rating,", "form = viewUserForm(request.POST, instance=user) return redirect(\"userlist\") def get(self, request, pk,", "context) class EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception =", "**kwargs): user = Account.objects.get(account_id=pk) form = viewUserForm(instance=user) context = {", "\"User created successfully!\") return redirect(\"login\") else: messages.error(request, \"User creation failed\")", "context = { \"form\": form, \"create_form\": create_form } return render(request,", "if request.method == \"POST\": create_form = createUserForm(data=request.POST) if create_form.is_valid(): user", "instance=user) if form.is_valid(): user = form.save() role = request.POST.get(\"role\") user.save()", "= loginForm(data=request.POST) if form.is_valid(): user = form.get_user() auth_login(request, user) print(\"succesful", "\"create_form\": create_form } return render(request, \"login.html\", context) def logout(request): auth.logout(request)", ": rating, \"review_count\" : review, } return render(request, \"home.html\", context)", "\"editUser.html\", extra_context) class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView): permission_required = 'accounts.view_account' template_name", "form = viewUserForm(instance=user) context = { \"form\": form, \"pk\": pk", "{\"create_form\": create_form}) def homepage(request): user = Account.objects.filter(is_superuser=False).count() rest = Restaurant.objects.all().count()", "\"userList.html\" queryset = Account.objects.all() class UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required =", "user.save() messages.success(request, \"User created successfully!\") return redirect(\"login\") else: messages.error(request, \"User", "{ \"form\": form, } print('something wrong') messages.error(request, \"Invalid input! Please", "form.cleaned_data[\"remember_me\"] if remember_me: request.session.set_expiry(1209600) return redirect(\"home\") else: messages.warning(request, 'There is", "from account.forms import * from data.models import * from django.contrib.auth", "= viewUserForm(request.POST, instance=user) return redirect(\"userlist\") def get(self, request, pk, *args,", "context) class ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.view_account' raise_exception =", "request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = editUserForm(instance=user)", "messages from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin # Create your views", "== \"POST\": form = loginForm(data=request.POST) if form.is_valid(): user = form.get_user()", "rest, \"rating_count\" : rating, \"review_count\" : review, } return render(request,", "editUserForm(request.POST, instance=user) if form.is_valid(): user = form.save() role = request.POST.get(\"role\")", "auth.logout(request) return redirect(\"login\") def register(request): if request.method == \"POST\": create_form", "extra_context) def get(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk)", "Create your views here. def login(request): if request.method == \"POST\":", "\"form\": form, } return render(request, \"editUser.html\", extra_context) class UserListView(LoginRequiredMixin, PermissionRequiredMixin,", "{ \"form\": form, } return render(request, \"editUser.html\", extra_context) class UserListView(LoginRequiredMixin,", "from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin # Create your views here.", "} return render(request, \"editUser.html\", extra_context) class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView): permission_required", "create_form}) def homepage(request): user = Account.objects.filter(is_superuser=False).count() rest = Restaurant.objects.all().count() rating", "redirect(\"login\") def register(request): if request.method == \"POST\": create_form = createUserForm(data=request.POST)", "form.is_valid(): user = form.get_user() auth_login(request, user) print(\"succesful login\") remember_me =", "form, \"pk\": pk } return render(request, \"profile.html\", context) class EditUserView(View,", "created successfully!\") return redirect(\"login\") else: messages.error(request, \"User creation failed\") else:", "user.profile_pic = request.FILES.get('profile-pic') user.save() return redirect('viewUser', pk) def deleteUser(request, event_id):", "\"pk\": pk } return render(request, \"profile.html\", context) class EditUserView(View, LoginRequiredMixin,", "django.contrib.auth.models import auth from django.contrib import messages from django.contrib.auth.mixins import", "create_form.save(commit=False) user.save() messages.success(request, \"User created successfully!\") return redirect(\"login\") else: messages.error(request,", "else: form = loginForm() create_form = createUserForm() context = {", "if create_form.is_valid(): user = create_form.save(commit=False) user.save() messages.success(request, \"User created successfully!\")", "print(\"succesful login\") remember_me = form.cleaned_data[\"remember_me\"] if remember_me: request.session.set_expiry(1209600) return redirect(\"home\")", "= editUserForm(instance=user) extra_context = { \"form\": form, } print('something wrong')", "LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.view_account' raise_exception = True def post(self,", "as auth_login from django.contrib.auth.models import auth from django.contrib import messages", "RestaurantReview.objects.exclude(rating__isnull=True).count() review = RestaurantReview.objects.exclude(review__isnull=True).count() context = { \"user_count\" : user,", "return render(request, \"login.html\", {\"create_form\": create_form}) def homepage(request): user = Account.objects.filter(is_superuser=False).count()", "imp from venv import create from django.shortcuts import render, redirect", "pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form = editUserForm(request.POST, instance=user)", "form, } return render(request, \"editUser.html\", extra_context) class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):", "form, } print('something wrong') messages.error(request, \"Invalid input! Please input a", "context = { \"form\": form, \"pk\": pk } return render(request,", "= Account.objects.filter(is_superuser=False).count() rest = Restaurant.objects.all().count() rating = RestaurantReview.objects.exclude(rating__isnull=True).count() review =", "* from django.contrib.auth import login as auth_login from django.contrib.auth.models import", "RestaurantReview.objects.exclude(review__isnull=True).count() context = { \"user_count\" : user, \"rest_count\" : rest,", "from account.models import * from account.forms import * from data.models", "instance=user) return redirect(\"userlist\") def get(self, request, pk, *args, **kwargs): user", "**kwargs): user = Account.objects.get(account_id=pk) form = viewUserForm(request.POST, instance=user) return redirect(\"userlist\")", "{ \"form\": form, \"pk\": pk } return render(request, \"profile.html\", context)", "successfully!\") return redirect(\"login\") else: messages.error(request, \"User creation failed\") else: create_form", "\"form\": form, \"create_form\": create_form } return render(request, \"login.html\", context) def", "= 'accounts.view_account' raise_exception = True def post(self, request, pk, *args,", "return redirect(\"home\") else: messages.warning(request, 'There is an issue with your", "from django.views.generic import ( ListView, ) from account.models import *", "template_name = \"userList.html\" queryset = Account.objects.all() class UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin):", "= viewUserForm(instance=user) context = { \"form\": form, \"pk\": pk }", "user = Account.objects.get(account_id=pk) form = editUserForm(request.POST, instance=user) if form.is_valid(): user", "return redirect(f'/viewUser/{user.account_id}') else: form = editUserForm(instance=user) extra_context = { \"form\":", "ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.view_account' raise_exception = True def", "Account.objects.get(account_id=pk) form = editUserForm(instance=user) extra_context = { \"form\": form, }", "Account.objects.get(account_id=pk) form = viewUserForm(instance=user) context = { \"form\": form, \"pk\":", "failed\") else: create_form = createUserForm() return render(request, \"login.html\", {\"create_form\": create_form})", "def post(self, request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) form", "is an issue with your login processes') return redirect(\"login\") else:", "else: form = editUserForm(instance=user) extra_context = { \"form\": form, }", "form = loginForm(data=request.POST) if form.is_valid(): user = form.get_user() auth_login(request, user)", ") from account.models import * from account.forms import * from", "venv import create from django.shortcuts import render, redirect from django.views", "= createUserForm() context = { \"form\": form, \"create_form\": create_form }", "render(request, \"login.html\", context) def logout(request): auth.logout(request) return redirect(\"login\") def register(request):", "messages.error(request, \"Invalid input! Please input a valid information.\") return render(request,", "redirect('viewUser', pk) def deleteUser(request, event_id): event = Account.objects.get(pk=event_id) event.delete() return", "*args, **kwargs): user = Account.objects.get(account_id=pk) form = viewUserForm(instance=user) context =", "import messages from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin # Create your", "Account.objects.get(account_id=pk) form = editUserForm(request.POST, instance=user) if form.is_valid(): user = form.save()", "remember_me = form.cleaned_data[\"remember_me\"] if remember_me: request.session.set_expiry(1209600) return redirect(\"home\") else: messages.warning(request,", "UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.change_account' raise_exception = True def", "request, pk, *args, **kwargs): user = Account.objects.get(account_id=pk) user.profile_pic = request.FILES.get('profile-pic')", "{ \"form\": form, \"create_form\": create_form } return render(request, \"login.html\", context)", "auth from django.contrib import messages from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin", "with your login processes') return redirect(\"login\") else: form = loginForm()", "= RestaurantReview.objects.exclude(rating__isnull=True).count() review = RestaurantReview.objects.exclude(review__isnull=True).count() context = { \"user_count\" :", "rating, \"review_count\" : review, } return render(request, \"home.html\", context) class", "*args, **kwargs): user = Account.objects.get(account_id=pk) form = viewUserForm(request.POST, instance=user) return", "extra_context) class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView): permission_required = 'accounts.view_account' template_name =", "class ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin): permission_required = 'accounts.view_account' raise_exception = True", "import ( ListView, ) from account.models import * from account.forms", "role = request.POST.get(\"role\") user.save() messages.success(request, \"Successfully updated profile!\") return redirect(f'/viewUser/{user.account_id}')", "raise_exception = True def post(self, request, pk, *args, **kwargs): user", "user, \"rest_count\" : rest, \"rating_count\" : rating, \"review_count\" : review,", "form.save() role = request.POST.get(\"role\") user.save() messages.success(request, \"Successfully updated profile!\") return" ]
[ "= asyncio.create_task(preview_window(dst_queue, threadpool)) try: while True: im = await ws.receive_bytes()", "= [int(cv2.IMWRITE_JPEG_QUALITY), 40] _, im = await loop.run_in_executor(threadpool, lambda: cv2.imencode('.jpg',", "loop.run_in_executor(threadpool, src.read) im = cv2.resize(im, (640, 384)) enc_param = [int(cv2.IMWRITE_JPEG_QUALITY),", "try: while True: im = await queue.get() im = np.frombuffer(im,", "queue.get() im = np.frombuffer(im, dtype=np.uint8) im = await loop.run_in_executor(threadpool, lambda:", "await ws.send_str('close') src_task.cancel() dst_task.cancel() await asyncio.wait([src_task, dst_task]) async def amain(url:", "cv2.destroyAllWindows() async def run_client( ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor ) ->", "lambda: cv2.imencode('.jpg', im, enc_param)) await ws.send_bytes(im.tobytes()) except asyncio.CancelledError: pass finally:", "dst_task]) async def amain(url: str): with concurrent.futures.ThreadPoolExecutor(max_workers=4) as threadpool: async", "asyncio.CancelledError: pass finally: src.release() async def preview_window(queue: asyncio.Queue, threadpool: concurrent.futures.ThreadPoolExecutor):", "except asyncio.CancelledError: pass finally: src.release() async def preview_window(queue: asyncio.Queue, threadpool:", "= await loop.run_in_executor(threadpool, src.read) im = cv2.resize(im, (640, 384)) enc_param", "dtype=np.uint8) im = await loop.run_in_executor(threadpool, lambda: cv2.imdecode(im, cv2.IMREAD_ANYCOLOR)) cv2.imshow('fpds_remote_preview', im)", "try: while True: im = await ws.receive_bytes() await dst_queue.put(im) except", "args = parser.parse_args() loop = asyncio.get_event_loop() task = loop.create_task(amain(args.url)) try:", "finally: src.release() async def preview_window(queue: asyncio.Queue, threadpool: concurrent.futures.ThreadPoolExecutor): loop =", "loop.run_in_executor(threadpool, lambda: cv2.imdecode(im, cv2.IMREAD_ANYCOLOR)) cv2.imshow('fpds_remote_preview', im) cv2.waitKey(1) except asyncio.CancelledError: pass", "-- dst_queue = asyncio.Queue(maxsize=1) src_task = asyncio.create_task(camera_source(ws, threadpool)) dst_task =", "src = await loop.run_in_executor(threadpool, lambda: cv2.VideoCapture(src_id)) while True: _, im", "await ws.receive_bytes() await dst_queue.put(im) except asyncio.CancelledError: await ws.send_str('close') src_task.cancel() dst_task.cancel()", "def amain(url: str): with concurrent.futures.ThreadPoolExecutor(max_workers=4) as threadpool: async with aiohttp.ClientSession()", "asyncio.CancelledError: await ws.send_str('close') src_task.cancel() dst_task.cancel() await asyncio.wait([src_task, dst_task]) async def", "src.read) im = cv2.resize(im, (640, 384)) enc_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40]", "asyncio.get_event_loop() task = loop.create_task(amain(args.url)) try: loop.run_until_complete(task) except KeyboardInterrupt: task.cancel() loop.run_until_complete(asyncio.wait_for(task,", "im, enc_param)) await ws.send_bytes(im.tobytes()) except asyncio.CancelledError: pass finally: src.release() async", "preview_window(queue: asyncio.Queue, threadpool: concurrent.futures.ThreadPoolExecutor): loop = asyncio.get_running_loop() try: while True:", "def run_client( ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor ) -> None: #", "ws.send_bytes(im.tobytes()) except asyncio.CancelledError: pass finally: src.release() async def preview_window(queue: asyncio.Queue,", "dst_queue = asyncio.Queue(maxsize=1) src_task = asyncio.create_task(camera_source(ws, threadpool)) dst_task = asyncio.create_task(preview_window(dst_queue,", "dst_task = asyncio.create_task(preview_window(dst_queue, threadpool)) try: while True: im = await", "-> None: # -- dst_queue = asyncio.Queue(maxsize=1) src_task = asyncio.create_task(camera_source(ws,", "im = await queue.get() im = np.frombuffer(im, dtype=np.uint8) im =", "ws.send_str('close') src_task.cancel() dst_task.cancel() await asyncio.wait([src_task, dst_task]) async def amain(url: str):", "http://localhost:8181/fpds') args = parser.parse_args() loop = asyncio.get_event_loop() task = loop.create_task(amain(args.url))", "task.cancel() loop.run_until_complete(asyncio.wait_for(task, timeout=None)) finally: loop.close() if __name__ == '__main__': main()", "enc_param)) await ws.send_bytes(im.tobytes()) except asyncio.CancelledError: pass finally: src.release() async def", "def camera_source(ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor, src_id: int=0): loop = asyncio.get_running_loop()", "= await loop.run_in_executor(threadpool, lambda: cv2.imencode('.jpg', im, enc_param)) await ws.send_bytes(im.tobytes()) except", "concurrent.futures.ThreadPoolExecutor(max_workers=4) as threadpool: async with aiohttp.ClientSession() as session, session.ws_connect(url) as", "async def preview_window(queue: asyncio.Queue, threadpool: concurrent.futures.ThreadPoolExecutor): loop = asyncio.get_running_loop() try:", "ws.receive_bytes() await dst_queue.put(im) except asyncio.CancelledError: await ws.send_str('close') src_task.cancel() dst_task.cancel() await", "aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor, src_id: int=0): loop = asyncio.get_running_loop() try: src", "loop.run_in_executor(threadpool, lambda: cv2.imencode('.jpg', im, enc_param)) await ws.send_bytes(im.tobytes()) except asyncio.CancelledError: pass", "asyncio.wait([src_task, dst_task]) async def amain(url: str): with concurrent.futures.ThreadPoolExecutor(max_workers=4) as threadpool:", "numpy as np async def camera_source(ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor, src_id:", "asyncio.Queue, threadpool: concurrent.futures.ThreadPoolExecutor): loop = asyncio.get_running_loop() try: while True: im", "cv2.imdecode(im, cv2.IMREAD_ANYCOLOR)) cv2.imshow('fpds_remote_preview', im) cv2.waitKey(1) except asyncio.CancelledError: pass finally: cv2.destroyAllWindows()", "run_client( ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor ) -> None: # --", "argparse.ArgumentParser('fpds.client') parser.add_argument('url', type=str, help='WebSocket endpoint of fpds.server e.g. http://localhost:8181/fpds') args", "np.frombuffer(im, dtype=np.uint8) im = await loop.run_in_executor(threadpool, lambda: cv2.imdecode(im, cv2.IMREAD_ANYCOLOR)) cv2.imshow('fpds_remote_preview',", "ws: await run_client(ws, threadpool) def main(): parser = argparse.ArgumentParser('fpds.client') parser.add_argument('url',", "loop.create_task(amain(args.url)) try: loop.run_until_complete(task) except KeyboardInterrupt: task.cancel() loop.run_until_complete(asyncio.wait_for(task, timeout=None)) finally: loop.close()", "loop.run_until_complete(task) except KeyboardInterrupt: task.cancel() loop.run_until_complete(asyncio.wait_for(task, timeout=None)) finally: loop.close() if __name__", "src.release() async def preview_window(queue: asyncio.Queue, threadpool: concurrent.futures.ThreadPoolExecutor): loop = asyncio.get_running_loop()", "import argparse import numpy as np async def camera_source(ws: aiohttp.ClientWebSocketResponse,", "parser = argparse.ArgumentParser('fpds.client') parser.add_argument('url', type=str, help='WebSocket endpoint of fpds.server e.g.", "try: loop.run_until_complete(task) except KeyboardInterrupt: task.cancel() loop.run_until_complete(asyncio.wait_for(task, timeout=None)) finally: loop.close() if", "concurrent.futures.ThreadPoolExecutor ) -> None: # -- dst_queue = asyncio.Queue(maxsize=1) src_task", "while True: _, im = await loop.run_in_executor(threadpool, src.read) im =", "asyncio.create_task(camera_source(ws, threadpool)) dst_task = asyncio.create_task(preview_window(dst_queue, threadpool)) try: while True: im", "threadpool)) dst_task = asyncio.create_task(preview_window(dst_queue, threadpool)) try: while True: im =", "<filename>fpds/client.py<gh_stars>0 import cv2 import aiohttp import asyncio import concurrent.futures import", "loop.run_in_executor(threadpool, lambda: cv2.VideoCapture(src_id)) while True: _, im = await loop.run_in_executor(threadpool,", "= await queue.get() im = np.frombuffer(im, dtype=np.uint8) im = await", "asyncio.CancelledError: pass finally: cv2.destroyAllWindows() async def run_client( ws: aiohttp.ClientWebSocketResponse, threadpool:", "as ws: await run_client(ws, threadpool) def main(): parser = argparse.ArgumentParser('fpds.client')", "main(): parser = argparse.ArgumentParser('fpds.client') parser.add_argument('url', type=str, help='WebSocket endpoint of fpds.server", "argparse import numpy as np async def camera_source(ws: aiohttp.ClientWebSocketResponse, threadpool:", "KeyboardInterrupt: task.cancel() loop.run_until_complete(asyncio.wait_for(task, timeout=None)) finally: loop.close() if __name__ == '__main__':", "cv2.imshow('fpds_remote_preview', im) cv2.waitKey(1) except asyncio.CancelledError: pass finally: cv2.destroyAllWindows() async def", "while True: im = await ws.receive_bytes() await dst_queue.put(im) except asyncio.CancelledError:", "asyncio.Queue(maxsize=1) src_task = asyncio.create_task(camera_source(ws, threadpool)) dst_task = asyncio.create_task(preview_window(dst_queue, threadpool)) try:", "await queue.get() im = np.frombuffer(im, dtype=np.uint8) im = await loop.run_in_executor(threadpool,", "await asyncio.wait([src_task, dst_task]) async def amain(url: str): with concurrent.futures.ThreadPoolExecutor(max_workers=4) as", "cv2.VideoCapture(src_id)) while True: _, im = await loop.run_in_executor(threadpool, src.read) im", "384)) enc_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40] _, im = await loop.run_in_executor(threadpool,", "await ws.send_bytes(im.tobytes()) except asyncio.CancelledError: pass finally: src.release() async def preview_window(queue:", "help='WebSocket endpoint of fpds.server e.g. http://localhost:8181/fpds') args = parser.parse_args() loop", "src_task.cancel() dst_task.cancel() await asyncio.wait([src_task, dst_task]) async def amain(url: str): with", "enc_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40] _, im = await loop.run_in_executor(threadpool, lambda:", "threadpool: async with aiohttp.ClientSession() as session, session.ws_connect(url) as ws: await", "def preview_window(queue: asyncio.Queue, threadpool: concurrent.futures.ThreadPoolExecutor): loop = asyncio.get_running_loop() try: while", "im = await loop.run_in_executor(threadpool, src.read) im = cv2.resize(im, (640, 384))", "amain(url: str): with concurrent.futures.ThreadPoolExecutor(max_workers=4) as threadpool: async with aiohttp.ClientSession() as", "concurrent.futures import argparse import numpy as np async def camera_source(ws:", "parser.add_argument('url', type=str, help='WebSocket endpoint of fpds.server e.g. http://localhost:8181/fpds') args =", "as np async def camera_source(ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor, src_id: int=0):", "threadpool: concurrent.futures.ThreadPoolExecutor): loop = asyncio.get_running_loop() try: while True: im =", "asyncio.get_running_loop() try: src = await loop.run_in_executor(threadpool, lambda: cv2.VideoCapture(src_id)) while True:", "dst_task.cancel() await asyncio.wait([src_task, dst_task]) async def amain(url: str): with concurrent.futures.ThreadPoolExecutor(max_workers=4)", "= asyncio.Queue(maxsize=1) src_task = asyncio.create_task(camera_source(ws, threadpool)) dst_task = asyncio.create_task(preview_window(dst_queue, threadpool))", "_, im = await loop.run_in_executor(threadpool, lambda: cv2.imencode('.jpg', im, enc_param)) await", "= loop.create_task(amain(args.url)) try: loop.run_until_complete(task) except KeyboardInterrupt: task.cancel() loop.run_until_complete(asyncio.wait_for(task, timeout=None)) finally:", "loop = asyncio.get_running_loop() try: while True: im = await queue.get()", "aiohttp import asyncio import concurrent.futures import argparse import numpy as", "threadpool)) try: while True: im = await ws.receive_bytes() await dst_queue.put(im)", "= parser.parse_args() loop = asyncio.get_event_loop() task = loop.create_task(amain(args.url)) try: loop.run_until_complete(task)", "asyncio import concurrent.futures import argparse import numpy as np async", "cv2 import aiohttp import asyncio import concurrent.futures import argparse import", "except asyncio.CancelledError: await ws.send_str('close') src_task.cancel() dst_task.cancel() await asyncio.wait([src_task, dst_task]) async", "asyncio.get_running_loop() try: while True: im = await queue.get() im =", "= await ws.receive_bytes() await dst_queue.put(im) except asyncio.CancelledError: await ws.send_str('close') src_task.cancel()", "True: _, im = await loop.run_in_executor(threadpool, src.read) im = cv2.resize(im,", "cv2.IMREAD_ANYCOLOR)) cv2.imshow('fpds_remote_preview', im) cv2.waitKey(1) except asyncio.CancelledError: pass finally: cv2.destroyAllWindows() async", "im) cv2.waitKey(1) except asyncio.CancelledError: pass finally: cv2.destroyAllWindows() async def run_client(", "im = await loop.run_in_executor(threadpool, lambda: cv2.imdecode(im, cv2.IMREAD_ANYCOLOR)) cv2.imshow('fpds_remote_preview', im) cv2.waitKey(1)", "(640, 384)) enc_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40] _, im = await", "_, im = await loop.run_in_executor(threadpool, src.read) im = cv2.resize(im, (640,", "run_client(ws, threadpool) def main(): parser = argparse.ArgumentParser('fpds.client') parser.add_argument('url', type=str, help='WebSocket", "pass finally: cv2.destroyAllWindows() async def run_client( ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor", "as threadpool: async with aiohttp.ClientSession() as session, session.ws_connect(url) as ws:", "= await loop.run_in_executor(threadpool, lambda: cv2.imdecode(im, cv2.IMREAD_ANYCOLOR)) cv2.imshow('fpds_remote_preview', im) cv2.waitKey(1) except", "threadpool: concurrent.futures.ThreadPoolExecutor ) -> None: # -- dst_queue = asyncio.Queue(maxsize=1)", "dst_queue.put(im) except asyncio.CancelledError: await ws.send_str('close') src_task.cancel() dst_task.cancel() await asyncio.wait([src_task, dst_task])", "concurrent.futures.ThreadPoolExecutor): loop = asyncio.get_running_loop() try: while True: im = await", "src_task = asyncio.create_task(camera_source(ws, threadpool)) dst_task = asyncio.create_task(preview_window(dst_queue, threadpool)) try: while", "threadpool) def main(): parser = argparse.ArgumentParser('fpds.client') parser.add_argument('url', type=str, help='WebSocket endpoint", "endpoint of fpds.server e.g. http://localhost:8181/fpds') args = parser.parse_args() loop =", "fpds.server e.g. http://localhost:8181/fpds') args = parser.parse_args() loop = asyncio.get_event_loop() task", ") -> None: # -- dst_queue = asyncio.Queue(maxsize=1) src_task =", "im = cv2.resize(im, (640, 384)) enc_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40] _,", "await loop.run_in_executor(threadpool, lambda: cv2.imencode('.jpg', im, enc_param)) await ws.send_bytes(im.tobytes()) except asyncio.CancelledError:", "loop = asyncio.get_running_loop() try: src = await loop.run_in_executor(threadpool, lambda: cv2.VideoCapture(src_id))", "lambda: cv2.VideoCapture(src_id)) while True: _, im = await loop.run_in_executor(threadpool, src.read)", "import numpy as np async def camera_source(ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor,", "aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor ) -> None: # -- dst_queue =", "await loop.run_in_executor(threadpool, lambda: cv2.VideoCapture(src_id)) while True: _, im = await", "except asyncio.CancelledError: pass finally: cv2.destroyAllWindows() async def run_client( ws: aiohttp.ClientWebSocketResponse,", "await run_client(ws, threadpool) def main(): parser = argparse.ArgumentParser('fpds.client') parser.add_argument('url', type=str,", "import concurrent.futures import argparse import numpy as np async def", "def main(): parser = argparse.ArgumentParser('fpds.client') parser.add_argument('url', type=str, help='WebSocket endpoint of", "task = loop.create_task(amain(args.url)) try: loop.run_until_complete(task) except KeyboardInterrupt: task.cancel() loop.run_until_complete(asyncio.wait_for(task, timeout=None))", "im = await ws.receive_bytes() await dst_queue.put(im) except asyncio.CancelledError: await ws.send_str('close')", "loop = asyncio.get_event_loop() task = loop.create_task(amain(args.url)) try: loop.run_until_complete(task) except KeyboardInterrupt:", "await loop.run_in_executor(threadpool, lambda: cv2.imdecode(im, cv2.IMREAD_ANYCOLOR)) cv2.imshow('fpds_remote_preview', im) cv2.waitKey(1) except asyncio.CancelledError:", "async def amain(url: str): with concurrent.futures.ThreadPoolExecutor(max_workers=4) as threadpool: async with", "= await loop.run_in_executor(threadpool, lambda: cv2.VideoCapture(src_id)) while True: _, im =", "async def run_client( ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor ) -> None:", "async with aiohttp.ClientSession() as session, session.ws_connect(url) as ws: await run_client(ws,", "np async def camera_source(ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor, src_id: int=0): loop", "import aiohttp import asyncio import concurrent.futures import argparse import numpy", "asyncio.create_task(preview_window(dst_queue, threadpool)) try: while True: im = await ws.receive_bytes() await", "while True: im = await queue.get() im = np.frombuffer(im, dtype=np.uint8)", "= asyncio.create_task(camera_source(ws, threadpool)) dst_task = asyncio.create_task(preview_window(dst_queue, threadpool)) try: while True:", "import cv2 import aiohttp import asyncio import concurrent.futures import argparse", "= cv2.resize(im, (640, 384)) enc_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40] _, im", "im = np.frombuffer(im, dtype=np.uint8) im = await loop.run_in_executor(threadpool, lambda: cv2.imdecode(im,", "import asyncio import concurrent.futures import argparse import numpy as np", "src_id: int=0): loop = asyncio.get_running_loop() try: src = await loop.run_in_executor(threadpool,", "await loop.run_in_executor(threadpool, src.read) im = cv2.resize(im, (640, 384)) enc_param =", "with aiohttp.ClientSession() as session, session.ws_connect(url) as ws: await run_client(ws, threadpool)", "type=str, help='WebSocket endpoint of fpds.server e.g. http://localhost:8181/fpds') args = parser.parse_args()", "int=0): loop = asyncio.get_running_loop() try: src = await loop.run_in_executor(threadpool, lambda:", "True: im = await queue.get() im = np.frombuffer(im, dtype=np.uint8) im", "session, session.ws_connect(url) as ws: await run_client(ws, threadpool) def main(): parser", "40] _, im = await loop.run_in_executor(threadpool, lambda: cv2.imencode('.jpg', im, enc_param))", "cv2.imencode('.jpg', im, enc_param)) await ws.send_bytes(im.tobytes()) except asyncio.CancelledError: pass finally: src.release()", "concurrent.futures.ThreadPoolExecutor, src_id: int=0): loop = asyncio.get_running_loop() try: src = await", "= np.frombuffer(im, dtype=np.uint8) im = await loop.run_in_executor(threadpool, lambda: cv2.imdecode(im, cv2.IMREAD_ANYCOLOR))", "e.g. http://localhost:8181/fpds') args = parser.parse_args() loop = asyncio.get_event_loop() task =", "cv2.waitKey(1) except asyncio.CancelledError: pass finally: cv2.destroyAllWindows() async def run_client( ws:", "# -- dst_queue = asyncio.Queue(maxsize=1) src_task = asyncio.create_task(camera_source(ws, threadpool)) dst_task", "parser.parse_args() loop = asyncio.get_event_loop() task = loop.create_task(amain(args.url)) try: loop.run_until_complete(task) except", "camera_source(ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor, src_id: int=0): loop = asyncio.get_running_loop() try:", "True: im = await ws.receive_bytes() await dst_queue.put(im) except asyncio.CancelledError: await", "as session, session.ws_connect(url) as ws: await run_client(ws, threadpool) def main():", "[int(cv2.IMWRITE_JPEG_QUALITY), 40] _, im = await loop.run_in_executor(threadpool, lambda: cv2.imencode('.jpg', im,", "im = await loop.run_in_executor(threadpool, lambda: cv2.imencode('.jpg', im, enc_param)) await ws.send_bytes(im.tobytes())", "= asyncio.get_running_loop() try: while True: im = await queue.get() im", "str): with concurrent.futures.ThreadPoolExecutor(max_workers=4) as threadpool: async with aiohttp.ClientSession() as session,", "pass finally: src.release() async def preview_window(queue: asyncio.Queue, threadpool: concurrent.futures.ThreadPoolExecutor): loop", "await dst_queue.put(im) except asyncio.CancelledError: await ws.send_str('close') src_task.cancel() dst_task.cancel() await asyncio.wait([src_task,", "= asyncio.get_event_loop() task = loop.create_task(amain(args.url)) try: loop.run_until_complete(task) except KeyboardInterrupt: task.cancel()", "cv2.resize(im, (640, 384)) enc_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40] _, im =", "aiohttp.ClientSession() as session, session.ws_connect(url) as ws: await run_client(ws, threadpool) def", "session.ws_connect(url) as ws: await run_client(ws, threadpool) def main(): parser =", "of fpds.server e.g. http://localhost:8181/fpds') args = parser.parse_args() loop = asyncio.get_event_loop()", "finally: cv2.destroyAllWindows() async def run_client( ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor )", "= asyncio.get_running_loop() try: src = await loop.run_in_executor(threadpool, lambda: cv2.VideoCapture(src_id)) while", "lambda: cv2.imdecode(im, cv2.IMREAD_ANYCOLOR)) cv2.imshow('fpds_remote_preview', im) cv2.waitKey(1) except asyncio.CancelledError: pass finally:", "None: # -- dst_queue = asyncio.Queue(maxsize=1) src_task = asyncio.create_task(camera_source(ws, threadpool))", "with concurrent.futures.ThreadPoolExecutor(max_workers=4) as threadpool: async with aiohttp.ClientSession() as session, session.ws_connect(url)", "try: src = await loop.run_in_executor(threadpool, lambda: cv2.VideoCapture(src_id)) while True: _,", "= argparse.ArgumentParser('fpds.client') parser.add_argument('url', type=str, help='WebSocket endpoint of fpds.server e.g. http://localhost:8181/fpds')", "except KeyboardInterrupt: task.cancel() loop.run_until_complete(asyncio.wait_for(task, timeout=None)) finally: loop.close() if __name__ ==", "threadpool: concurrent.futures.ThreadPoolExecutor, src_id: int=0): loop = asyncio.get_running_loop() try: src =", "ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor ) -> None: # -- dst_queue", "async def camera_source(ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor, src_id: int=0): loop =" ]
[ "+ str(len(self._cache)) + ' size: ' + bytes_2_human_readable( sys.getsizeof(self._cache))) def", "Read cache entries :param key: :return: \"\"\" self._lock.acquire() result =", "\"\"\" Read complex cache entries \"\"\" return self.get(self._get_id(list_of_keys)) def cache_complex(self,", "are some old entries without timestamp if isinstance(value, str) or", "of an entry, may be helpful in the future :param", "\"\"\" self._lock.acquire() if value is not None: self._cache[key] = self._pack(value);", "self._cache[key] = self._pack(value); self.log.debug(self._cache_path + ' CACHED: ' + str(key)", "cache_path): \"\"\" :param cache_path: path to cache, must be relative", "# reload cache object form disc, if any with open(self._cache_path,", "' size: ' + bytes_2_human_readable( sys.getsizeof(self._cache))) def persist(self): with open(self._cache_path,", "self._cache.get(key) if value is not None: self.log.debug(self._cache_path + ' LOADED:", "want to store \"No distance\" :param key: :param value: :return:", ":return: \"\"\" self._lock.acquire() if value is not None: self._cache[key] =", "+ str(value)) self.persist() self._lock.release() def get(self, key): \"\"\" Read cache", "' entries: ' + str(len(self._cache)) + ' size: ' +", "+ self._cache_path + ' restored') self.log_stats() else: self._cache = {}", "else: self._cache = {} self._lock = threading.Lock() def log_stats(self): #", "str(value)) self.persist() self._lock.release() def get(self, key): \"\"\" Read cache entries", ":param value: :return: \"\"\" # there are some old entries", "key): \"\"\" Read cache entries :param key: :return: \"\"\" self._lock.acquire()", "complex cache entries \"\"\" return self.get(self._get_id(list_of_keys)) def cache_complex(self, list_of_keys: List[str],", "the root.py file \"\"\" self.log = logging.getLogger('GiveMe5W') # resolve path", "self._lock.acquire() result = None value = self._cache.get(key) if value is", "ad a meaningful extension self._cache_path = self._cache_path + '.prickle' self._cache", "import os import pickle import sys import threading import time", "value = self._cache.get(key) if value is not None: self.log.debug(self._cache_path +", "Read complex cache entries \"\"\" return self.get(self._get_id(list_of_keys)) def cache_complex(self, list_of_keys:", "+ str(key) + ': ' + str(value)) self.persist() self._lock.release() def", "get_complex(self, list_of_keys: List[str]): \"\"\" Read complex cache entries \"\"\" return", "+ ' restored') self.log_stats() else: self._cache = {} self._lock =", "value): \"\"\" cache tracks the age of an entry, may", "self._lock.release() def get(self, key): \"\"\" Read cache entries :param key:", "def get(self, key): \"\"\" Read cache entries :param key: :return:", "value) def _get_id(self, list_of_keys: List[str]): \"\"\" sorts list_of_keys, concatenates with", "the cached value, if any :param value: :return: \"\"\" #", "' CACHED: ' + str(key) + ': ' + str(value))", ":param value: :return: \"\"\" return [value, str(time.time())] def _unpack(self, value):", "some old entries without timestamp if isinstance(value, str) or isinstance(value,", "' + str(value)) self.persist() self._lock.release() def get(self, key): \"\"\" Read", "file \"\"\" self.log = logging.getLogger('GiveMe5W') # resolve path relative to", "+ bytes_2_human_readable( sys.getsizeof(self._cache))) def persist(self): with open(self._cache_path, 'wb') as f:", "log_stats(self): # size is not considering child's self.log.info(self._cache_path + '", "child's self.log.info(self._cache_path + ' entries: ' + str(len(self._cache)) + '", "value): \"\"\" helper to cache multi (string)key values. They are", "{} self._lock = threading.Lock() def log_stats(self): # size is not", "\"\"\" None values are considered as invalid results (ToughRequest) is", "List[str]): \"\"\" sorts list_of_keys, concatenates with # for readability :param", "to store \"No distance\" :param key: :param value: :return: \"\"\"", "value: object): \"\"\" None values are considered as invalid results", "str(len(self._cache)) + ' size: ' + bytes_2_human_readable( sys.getsizeof(self._cache))) def persist(self):", "\"\"\" helper to cache multi (string)key values. They are sorted", "+ '.prickle' self._cache = {} if cache_path and os.path.isfile(self._cache_path) and", "= threading.Lock() def log_stats(self): # size is not considering child's", "_pack(self, value): \"\"\" cache tracks the age of an entry,", "str(key) + ': ' + str(value)) self.persist() self._lock.release() def get(self,", "value, if any :param value: :return: \"\"\" # there are", "path from Giveme5W1H.extractor.tools.util import bytes_2_human_readable class KeyValueCache(object): def __init__(self, cache_path):", "' + bytes_2_human_readable( sys.getsizeof(self._cache))) def persist(self): with open(self._cache_path, 'wb') as", "distance\" :param key: :param value: :return: \"\"\" self._lock.acquire() if value", "str, value: object): \"\"\" None values are considered as invalid", "+ str(value)) result = self._unpack(value) self._lock.release() return result def get_complex(self,", "return self.get(self._get_id(list_of_keys)) def cache_complex(self, list_of_keys: List[str], value): \"\"\" helper to", "'.prickle' self._cache = {} if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path)", "{} if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0: #", "none for exceptions set -1 if you want to store", "\"\"\" sorted(list_of_keys) return \"#\".join(list_of_keys) def _pack(self, value): \"\"\" cache tracks", "any with open(self._cache_path, 'rb') as ff: self._cache = pickle.load(ff) self.log.debug('KeyValueCache:", "extension self._cache_path = self._cache_path + '.prickle' self._cache = {} if", ":param cache_path: path to cache, must be relative to the", "are considered as invalid results (ToughRequest) is producing none for", "self._cache_path = path(cache_path) # ad a meaningful extension self._cache_path =", "value): \"\"\" removes the timestamp around the cached value, if", "KeyValueCache(object): def __init__(self, cache_path): \"\"\" :param cache_path: path to cache,", "= path(cache_path) # ad a meaningful extension self._cache_path = self._cache_path", "if isinstance(value, str) or isinstance(value, int): return value return value[0]", "# for readability :param list_of_keys: :return: \"\"\" sorted(list_of_keys) return \"#\".join(list_of_keys)", "relative to the root.py file \"\"\" self.log = logging.getLogger('GiveMe5W') #", "entries: ' + str(len(self._cache)) + ' size: ' + bytes_2_human_readable(", "\"\"\" self._lock.acquire() result = None value = self._cache.get(key) if value", "is producing none for exceptions set -1 if you want", "as f: pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL) def cache(self, key: str, value:", ":return: \"\"\" sorted(list_of_keys) return \"#\".join(list_of_keys) def _pack(self, value): \"\"\" cache", ":param value: :return: \"\"\" self._lock.acquire() if value is not None:", "size: ' + bytes_2_human_readable( sys.getsizeof(self._cache))) def persist(self): with open(self._cache_path, 'wb')", "= None value = self._cache.get(key) if value is not None:", "threading import time from typing import List from Giveme5W1H.extractor.root import", "self.log.debug(self._cache_path + ' CACHED: ' + str(key) + ': '", "values are considered as invalid results (ToughRequest) is producing none", "set -1 if you want to store \"No distance\" :param", "' LOADED: ' + str(key) + ': ' + str(value))", "may be helpful in the future :param value: :return: \"\"\"", "pickle.load(ff) self.log.debug('KeyValueCache: ' + self._cache_path + ' restored') self.log_stats() else:", "there are some old entries without timestamp if isinstance(value, str)", "any :param value: :return: \"\"\" # there are some old", "to cache multi (string)key values. They are sorted before concatenation,", "self.log.debug(self._cache_path + ' LOADED: ' + str(key) + ': '", "import threading import time from typing import List from Giveme5W1H.extractor.root", "logging import os import pickle import sys import threading import", "cache(self, key: str, value: object): \"\"\" None values are considered", "reload cache object form disc, if any with open(self._cache_path, 'rb')", "self._cache = pickle.load(ff) self.log.debug('KeyValueCache: ' + self._cache_path + ' restored')", "be helpful in the future :param value: :return: \"\"\" return", "os.path.getsize(self._cache_path) > 0: # reload cache object form disc, if", "if value is not None: self._cache[key] = self._pack(value); self.log.debug(self._cache_path +", "determined. \"\"\" self.cache(self._get_id(list_of_keys), value) def _get_id(self, list_of_keys: List[str]): \"\"\" sorts", "considering child's self.log.info(self._cache_path + ' entries: ' + str(len(self._cache)) +", "def cache_complex(self, list_of_keys: List[str], value): \"\"\" helper to cache multi", "cache_complex(self, list_of_keys: List[str], value): \"\"\" helper to cache multi (string)key", "sorted(list_of_keys) return \"#\".join(list_of_keys) def _pack(self, value): \"\"\" cache tracks the", "for exceptions set -1 if you want to store \"No", "as ff: self._cache = pickle.load(ff) self.log.debug('KeyValueCache: ' + self._cache_path +", "you want to store \"No distance\" :param key: :param value:", "concatenation, therefore an order is determined. \"\"\" self.cache(self._get_id(list_of_keys), value) def", "object): \"\"\" None values are considered as invalid results (ToughRequest)", "self.get(self._get_id(list_of_keys)) def cache_complex(self, list_of_keys: List[str], value): \"\"\" helper to cache", "to cache, must be relative to the root.py file \"\"\"", "around the cached value, if any :param value: :return: \"\"\"", "value is not None: self.log.debug(self._cache_path + ' LOADED: ' +", "the path file self._cache_path = path(cache_path) # ad a meaningful", "def _pack(self, value): \"\"\" cache tracks the age of an", "as invalid results (ToughRequest) is producing none for exceptions set", "with open(self._cache_path, 'rb') as ff: self._cache = pickle.load(ff) self.log.debug('KeyValueCache: '", "value: :return: \"\"\" # there are some old entries without", "invalid results (ToughRequest) is producing none for exceptions set -1", "' restored') self.log_stats() else: self._cache = {} self._lock = threading.Lock()", "None: self._cache[key] = self._pack(value); self.log.debug(self._cache_path + ' CACHED: ' +", "an entry, may be helpful in the future :param value:", "_unpack(self, value): \"\"\" removes the timestamp around the cached value,", "= self._pack(value); self.log.debug(self._cache_path + ' CACHED: ' + str(key) +", "+ str(key) + ': ' + str(value)) result = self._unpack(value)", "removes the timestamp around the cached value, if any :param", "return \"#\".join(list_of_keys) def _pack(self, value): \"\"\" cache tracks the age", "if any :param value: :return: \"\"\" # there are some", "-1 if you want to store \"No distance\" :param key:", "timestamp around the cached value, if any :param value: :return:", "to the path file self._cache_path = path(cache_path) # ad a", "\"\"\" sorts list_of_keys, concatenates with # for readability :param list_of_keys:", "entries without timestamp if isinstance(value, str) or isinstance(value, int): return", "file self._cache_path = path(cache_path) # ad a meaningful extension self._cache_path", "result = self._unpack(value) self._lock.release() return result def get_complex(self, list_of_keys: List[str]):", "self.log = logging.getLogger('GiveMe5W') # resolve path relative to the path", "class KeyValueCache(object): def __init__(self, cache_path): \"\"\" :param cache_path: path to", "None values are considered as invalid results (ToughRequest) is producing", "path(cache_path) # ad a meaningful extension self._cache_path = self._cache_path +", "the age of an entry, may be helpful in the", "'rb') as ff: self._cache = pickle.load(ff) self.log.debug('KeyValueCache: ' + self._cache_path", "import bytes_2_human_readable class KeyValueCache(object): def __init__(self, cache_path): \"\"\" :param cache_path:", "sorted before concatenation, therefore an order is determined. \"\"\" self.cache(self._get_id(list_of_keys),", "': ' + str(value)) result = self._unpack(value) self._lock.release() return result", ":return: \"\"\" # there are some old entries without timestamp", "not None: self._cache[key] = self._pack(value); self.log.debug(self._cache_path + ' CACHED: '", "a meaningful extension self._cache_path = self._cache_path + '.prickle' self._cache =", "is not considering child's self.log.info(self._cache_path + ' entries: ' +", "entry, may be helpful in the future :param value: :return:", "= logging.getLogger('GiveMe5W') # resolve path relative to the path file", "with open(self._cache_path, 'wb') as f: pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL) def cache(self,", "logging.getLogger('GiveMe5W') # resolve path relative to the path file self._cache_path", "self._cache = {} self._lock = threading.Lock() def log_stats(self): # size", "# size is not considering child's self.log.info(self._cache_path + ' entries:", "path file self._cache_path = path(cache_path) # ad a meaningful extension", "list_of_keys, concatenates with # for readability :param list_of_keys: :return: \"\"\"", "the timestamp around the cached value, if any :param value:", "cached value, if any :param value: :return: \"\"\" # there", "cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0: # reload cache", "in the future :param value: :return: \"\"\" return [value, str(time.time())]", "is not None: self.log.debug(self._cache_path + ' LOADED: ' + str(key)", "= self._unpack(value) self._lock.release() return result def get_complex(self, list_of_keys: List[str]): \"\"\"", "cache multi (string)key values. They are sorted before concatenation, therefore", "def persist(self): with open(self._cache_path, 'wb') as f: pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL)", "concatenates with # for readability :param list_of_keys: :return: \"\"\" sorted(list_of_keys)", "# ad a meaningful extension self._cache_path = self._cache_path + '.prickle'", "\"#\".join(list_of_keys) def _pack(self, value): \"\"\" cache tracks the age of", "is determined. \"\"\" self.cache(self._get_id(list_of_keys), value) def _get_id(self, list_of_keys: List[str]): \"\"\"", "self._lock.acquire() if value is not None: self._cache[key] = self._pack(value); self.log.debug(self._cache_path", "self._unpack(value) self._lock.release() return result def get_complex(self, list_of_keys: List[str]): \"\"\" Read", "import time from typing import List from Giveme5W1H.extractor.root import path", "\"\"\" Read cache entries :param key: :return: \"\"\" self._lock.acquire() result", "values. They are sorted before concatenation, therefore an order is", "resolve path relative to the path file self._cache_path = path(cache_path)", "bytes_2_human_readable( sys.getsizeof(self._cache))) def persist(self): with open(self._cache_path, 'wb') as f: pickle.dump(self._cache,", "self.log_stats() else: self._cache = {} self._lock = threading.Lock() def log_stats(self):", "def cache(self, key: str, value: object): \"\"\" None values are", "import List from Giveme5W1H.extractor.root import path from Giveme5W1H.extractor.tools.util import bytes_2_human_readable", "__init__(self, cache_path): \"\"\" :param cache_path: path to cache, must be", "key: str, value: object): \"\"\" None values are considered as", "List from Giveme5W1H.extractor.root import path from Giveme5W1H.extractor.tools.util import bytes_2_human_readable class", "value: :return: \"\"\" self._lock.acquire() if value is not None: self._cache[key]", "\"No distance\" :param key: :param value: :return: \"\"\" self._lock.acquire() if", "threading.Lock() def log_stats(self): # size is not considering child's self.log.info(self._cache_path", "helper to cache multi (string)key values. They are sorted before", "store \"No distance\" :param key: :param value: :return: \"\"\" self._lock.acquire()", "path to cache, must be relative to the root.py file", "import path from Giveme5W1H.extractor.tools.util import bytes_2_human_readable class KeyValueCache(object): def __init__(self,", "from Giveme5W1H.extractor.root import path from Giveme5W1H.extractor.tools.util import bytes_2_human_readable class KeyValueCache(object):", "key: :return: \"\"\" self._lock.acquire() result = None value = self._cache.get(key)", "'wb') as f: pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL) def cache(self, key: str,", "self._cache_path = self._cache_path + '.prickle' self._cache = {} if cache_path", "cache tracks the age of an entry, may be helpful", "\"\"\" :param cache_path: path to cache, must be relative to", "list_of_keys: List[str]): \"\"\" Read complex cache entries \"\"\" return self.get(self._get_id(list_of_keys))", "List[str]): \"\"\" Read complex cache entries \"\"\" return self.get(self._get_id(list_of_keys)) def", "None: self.log.debug(self._cache_path + ' LOADED: ' + str(key) + ':", "+ ' entries: ' + str(len(self._cache)) + ' size: '", "cache entries :param key: :return: \"\"\" self._lock.acquire() result = None", "self.log.debug('KeyValueCache: ' + self._cache_path + ' restored') self.log_stats() else: self._cache", "os import pickle import sys import threading import time from", "value is not None: self._cache[key] = self._pack(value); self.log.debug(self._cache_path + '", "+ ': ' + str(value)) result = self._unpack(value) self._lock.release() return", "relative to the path file self._cache_path = path(cache_path) # ad", "the future :param value: :return: \"\"\" return [value, str(time.time())] def", "disc, if any with open(self._cache_path, 'rb') as ff: self._cache =", "results (ToughRequest) is producing none for exceptions set -1 if", "They are sorted before concatenation, therefore an order is determined.", "\"\"\" removes the timestamp around the cached value, if any", "str(value)) result = self._unpack(value) self._lock.release() return result def get_complex(self, list_of_keys:", "ff: self._cache = pickle.load(ff) self.log.debug('KeyValueCache: ' + self._cache_path + '", "f: pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL) def cache(self, key: str, value: object):", "size is not considering child's self.log.info(self._cache_path + ' entries: '", "+ ' size: ' + bytes_2_human_readable( sys.getsizeof(self._cache))) def persist(self): with", "\"\"\" cache tracks the age of an entry, may be", "' + str(len(self._cache)) + ' size: ' + bytes_2_human_readable( sys.getsizeof(self._cache)))", "key: :param value: :return: \"\"\" self._lock.acquire() if value is not", ":param key: :return: \"\"\" self._lock.acquire() result = None value =", ":param list_of_keys: :return: \"\"\" sorted(list_of_keys) return \"#\".join(list_of_keys) def _pack(self, value):", "old entries without timestamp if isinstance(value, str) or isinstance(value, int):", "List[str], value): \"\"\" helper to cache multi (string)key values. They", "be relative to the root.py file \"\"\" self.log = logging.getLogger('GiveMe5W')", "import logging import os import pickle import sys import threading", ":return: \"\"\" self._lock.acquire() result = None value = self._cache.get(key) if", "age of an entry, may be helpful in the future", "self._cache = {} if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) >", "import sys import threading import time from typing import List", "considered as invalid results (ToughRequest) is producing none for exceptions", "\"\"\" return [value, str(time.time())] def _unpack(self, value): \"\"\" removes the", "to the root.py file \"\"\" self.log = logging.getLogger('GiveMe5W') # resolve", "timestamp if isinstance(value, str) or isinstance(value, int): return value return", "' + str(key) + ': ' + str(value)) result =", "pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL) def cache(self, key: str, value: object): \"\"\"", "form disc, if any with open(self._cache_path, 'rb') as ff: self._cache", "multi (string)key values. They are sorted before concatenation, therefore an", "+ ' CACHED: ' + str(key) + ': ' +", "cache_path: path to cache, must be relative to the root.py", "list_of_keys: List[str]): \"\"\" sorts list_of_keys, concatenates with # for readability", "with # for readability :param list_of_keys: :return: \"\"\" sorted(list_of_keys) return", "= self._cache.get(key) if value is not None: self.log.debug(self._cache_path + '", "not None: self.log.debug(self._cache_path + ' LOADED: ' + str(key) +", "without timestamp if isinstance(value, str) or isinstance(value, int): return value", "return [value, str(time.time())] def _unpack(self, value): \"\"\" removes the timestamp", "open(self._cache_path, 'wb') as f: pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL) def cache(self, key:", "Giveme5W1H.extractor.tools.util import bytes_2_human_readable class KeyValueCache(object): def __init__(self, cache_path): \"\"\" :param", "path relative to the path file self._cache_path = path(cache_path) #", "object form disc, if any with open(self._cache_path, 'rb') as ff:", "sys import threading import time from typing import List from", "self.persist() self._lock.release() def get(self, key): \"\"\" Read cache entries :param", "def __init__(self, cache_path): \"\"\" :param cache_path: path to cache, must", "an order is determined. \"\"\" self.cache(self._get_id(list_of_keys), value) def _get_id(self, list_of_keys:", "producing none for exceptions set -1 if you want to", "return result def get_complex(self, list_of_keys: List[str]): \"\"\" Read complex cache", "bytes_2_human_readable class KeyValueCache(object): def __init__(self, cache_path): \"\"\" :param cache_path: path", "f, pickle.HIGHEST_PROTOCOL) def cache(self, key: str, value: object): \"\"\" None", "os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0: # reload cache object form", "self._lock.release() return result def get_complex(self, list_of_keys: List[str]): \"\"\" Read complex", "pickle import sys import threading import time from typing import", "' + str(value)) result = self._unpack(value) self._lock.release() return result def", "> 0: # reload cache object form disc, if any", "cache object form disc, if any with open(self._cache_path, 'rb') as", "# there are some old entries without timestamp if isinstance(value,", "def _unpack(self, value): \"\"\" removes the timestamp around the cached", "cache, must be relative to the root.py file \"\"\" self.log", "': ' + str(value)) self.persist() self._lock.release() def get(self, key): \"\"\"", "entries \"\"\" return self.get(self._get_id(list_of_keys)) def cache_complex(self, list_of_keys: List[str], value): \"\"\"", "from Giveme5W1H.extractor.tools.util import bytes_2_human_readable class KeyValueCache(object): def __init__(self, cache_path): \"\"\"", "' + str(key) + ': ' + str(value)) self.persist() self._lock.release()", "for readability :param list_of_keys: :return: \"\"\" sorted(list_of_keys) return \"#\".join(list_of_keys) def", ":return: \"\"\" return [value, str(time.time())] def _unpack(self, value): \"\"\" removes", "must be relative to the root.py file \"\"\" self.log =", "def get_complex(self, list_of_keys: List[str]): \"\"\" Read complex cache entries \"\"\"", "str(time.time())] def _unpack(self, value): \"\"\" removes the timestamp around the", "self._pack(value); self.log.debug(self._cache_path + ' CACHED: ' + str(key) + ':", "get(self, key): \"\"\" Read cache entries :param key: :return: \"\"\"", "restored') self.log_stats() else: self._cache = {} self._lock = threading.Lock() def", "self._cache_path + '.prickle' self._cache = {} if cache_path and os.path.isfile(self._cache_path)", "(ToughRequest) is producing none for exceptions set -1 if you", "result def get_complex(self, list_of_keys: List[str]): \"\"\" Read complex cache entries", ":param key: :param value: :return: \"\"\" self._lock.acquire() if value is", "meaningful extension self._cache_path = self._cache_path + '.prickle' self._cache = {}", "import pickle import sys import threading import time from typing", "before concatenation, therefore an order is determined. \"\"\" self.cache(self._get_id(list_of_keys), value)", "are sorted before concatenation, therefore an order is determined. \"\"\"", "entries :param key: :return: \"\"\" self._lock.acquire() result = None value", "exceptions set -1 if you want to store \"No distance\"", "= {} self._lock = threading.Lock() def log_stats(self): # size is", "def log_stats(self): # size is not considering child's self.log.info(self._cache_path +", "(string)key values. They are sorted before concatenation, therefore an order", "value: :return: \"\"\" return [value, str(time.time())] def _unpack(self, value): \"\"\"", "cache entries \"\"\" return self.get(self._get_id(list_of_keys)) def cache_complex(self, list_of_keys: List[str], value):", "def _get_id(self, list_of_keys: List[str]): \"\"\" sorts list_of_keys, concatenates with #", "helpful in the future :param value: :return: \"\"\" return [value,", "if you want to store \"No distance\" :param key: :param", "if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0: # reload", "readability :param list_of_keys: :return: \"\"\" sorted(list_of_keys) return \"#\".join(list_of_keys) def _pack(self,", "and os.path.getsize(self._cache_path) > 0: # reload cache object form disc,", "root.py file \"\"\" self.log = logging.getLogger('GiveMe5W') # resolve path relative", "Giveme5W1H.extractor.root import path from Giveme5W1H.extractor.tools.util import bytes_2_human_readable class KeyValueCache(object): def", "pickle.HIGHEST_PROTOCOL) def cache(self, key: str, value: object): \"\"\" None values", "self._lock = threading.Lock() def log_stats(self): # size is not considering", "+ ' LOADED: ' + str(key) + ': ' +", "= pickle.load(ff) self.log.debug('KeyValueCache: ' + self._cache_path + ' restored') self.log_stats()", "_get_id(self, list_of_keys: List[str]): \"\"\" sorts list_of_keys, concatenates with # for", "sys.getsizeof(self._cache))) def persist(self): with open(self._cache_path, 'wb') as f: pickle.dump(self._cache, f,", "[value, str(time.time())] def _unpack(self, value): \"\"\" removes the timestamp around", "is not None: self._cache[key] = self._pack(value); self.log.debug(self._cache_path + ' CACHED:", "list_of_keys: :return: \"\"\" sorted(list_of_keys) return \"#\".join(list_of_keys) def _pack(self, value): \"\"\"", "not considering child's self.log.info(self._cache_path + ' entries: ' + str(len(self._cache))", "if value is not None: self.log.debug(self._cache_path + ' LOADED: '", "\"\"\" return self.get(self._get_id(list_of_keys)) def cache_complex(self, list_of_keys: List[str], value): \"\"\" helper", "persist(self): with open(self._cache_path, 'wb') as f: pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL) def", "list_of_keys: List[str], value): \"\"\" helper to cache multi (string)key values.", "self.cache(self._get_id(list_of_keys), value) def _get_id(self, list_of_keys: List[str]): \"\"\" sorts list_of_keys, concatenates", "0: # reload cache object form disc, if any with", "therefore an order is determined. \"\"\" self.cache(self._get_id(list_of_keys), value) def _get_id(self,", "future :param value: :return: \"\"\" return [value, str(time.time())] def _unpack(self,", "typing import List from Giveme5W1H.extractor.root import path from Giveme5W1H.extractor.tools.util import", "and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0: # reload cache object", "CACHED: ' + str(key) + ': ' + str(value)) self.persist()", "order is determined. \"\"\" self.cache(self._get_id(list_of_keys), value) def _get_id(self, list_of_keys: List[str]):", "# resolve path relative to the path file self._cache_path =", "if any with open(self._cache_path, 'rb') as ff: self._cache = pickle.load(ff)", "sorts list_of_keys, concatenates with # for readability :param list_of_keys: :return:", "\"\"\" self.cache(self._get_id(list_of_keys), value) def _get_id(self, list_of_keys: List[str]): \"\"\" sorts list_of_keys,", "\"\"\" # there are some old entries without timestamp if", "' + self._cache_path + ' restored') self.log_stats() else: self._cache =", "LOADED: ' + str(key) + ': ' + str(value)) result", "time from typing import List from Giveme5W1H.extractor.root import path from", "= self._cache_path + '.prickle' self._cache = {} if cache_path and", "tracks the age of an entry, may be helpful in", "str(key) + ': ' + str(value)) result = self._unpack(value) self._lock.release()", "from typing import List from Giveme5W1H.extractor.root import path from Giveme5W1H.extractor.tools.util", "= {} if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0:", "None value = self._cache.get(key) if value is not None: self.log.debug(self._cache_path", "\"\"\" self.log = logging.getLogger('GiveMe5W') # resolve path relative to the", "open(self._cache_path, 'rb') as ff: self._cache = pickle.load(ff) self.log.debug('KeyValueCache: ' +", "+ ': ' + str(value)) self.persist() self._lock.release() def get(self, key):", "self.log.info(self._cache_path + ' entries: ' + str(len(self._cache)) + ' size:", "self._cache_path + ' restored') self.log_stats() else: self._cache = {} self._lock", "result = None value = self._cache.get(key) if value is not" ]
[ "if __name__ == '__main__': parser = ArgumentParser() parser.add_argument(\"--nsst_file\", default=\"output/nsst_tss20_th4_nSt100_Q0.pkl\", help=\"nsst", "word in src.split(\" \") if len(word)] token_tgt = [nsst.tokenization_tgt[word] if", "[w for w in reg[0].split(' ') if len(w)] pred_str =", "= ArgumentParser() parser.add_argument(\"--nsst_file\", default=\"output/nsst_tss20_th4_nSt100_Q0.pkl\", help=\"nsst file\") parser.add_argument(\"--src_lang\", default=\"output/europarl-v7.de-en.de.clean\") parser.add_argument(\"--tgt_lang\", default=\"output/europarl-v7.de-en.en.clean\")", "src, tgt, _ in tqdm(list(zip(src_file, tgt_file, range(4096))), desc=\"Processing sentences\"): #", "range(4096))), desc=\"Processing sentences\"): # remove line breaks src = src[:-1]", "key=lambda x: x[2], reverse=True)[0] n_res = len(result) q, reg, prob", "False # load NSST nsst = NSST.NSST() nsst.load(args.nsst_file) args.nsst =", "tgt[:-1] # try to translate try: # prepare tokenisations token_src", "args.input = src args.token_src = token_src result = best_transition_sequence(args) #", "= tgt[:-1] # try to translate try: # prepare tokenisations", "for t in token_pred: pred_str += f\"{nsst.tokenization_tgt_lut[int(t)]} \" token_src_str =", "f\"{t} \" token_pred_str = \"\" for t in token_pred: token_pred_str", "nsst_translate import best_transition_sequence if __name__ == '__main__': parser = ArgumentParser()", "if ('Qf' in args.nsst_file or not args.enforce_n_final_reg or len(k[1]) ==", "or not args.enforce_n_final_reg or len(k[1]) == 1) and ('Q0' in", "pred # write to csv if not len(reg): # catch", "over sentences, first 4096 -> test sentences for src, tgt,", "NSST from nsst_translate import best_transition_sequence if __name__ == '__main__': parser", "q, reg, prob = pred # write to csv if", "= open(args.output, 'w') # iterate over sentences, first 4096 ->", "-1) ), key=lambda x: x[2], reverse=True)[0] n_res = len(result) q,", "import NSST from nsst_translate import best_transition_sequence if __name__ == '__main__':", "output_file = open(args.output, 'w') # iterate over sentences, first 4096", "tgt, _ in tqdm(list(zip(src_file, tgt_file, range(4096))), desc=\"Processing sentences\"): # remove", "args.token_src = token_src result = best_transition_sequence(args) # get best result", "= sorted((k for k in result if ('Qf' in args.nsst_file", "test sentences for src, tgt, _ in tqdm(list(zip(src_file, tgt_file, range(4096))),", "\" print(f\"{src};{token_src_str[:-1]};\" f\"{tgt};{token_tgt_str[:-1]};\" f\"{pred_str};{token_pred_str[:-1]};\" f\"{prob};{len(reg)};{n_res}\", file=output_file) output_file.flush() except RuntimeError: pass", "[nsst.tokenization_src[word] if word in nsst.tokenization_src else 0 for word in", "desc=\"Processing sentences\"): # remove line breaks src = src[:-1] tgt", "open(args.src_lang, 'r') tgt_file = open(args.tgt_lang, 'r') output_file = open(args.output, 'w')", "print(f\"{src};{token_src_str[:-1]};\" f\"{tgt};{token_tgt_str[:-1]};\" f\"{pred_str};{token_pred_str[:-1]};\" f\"{prob};{len(reg)};{n_res}\", file=output_file) output_file.flush() except RuntimeError: pass #", "src_file = open(args.src_lang, 'r') tgt_file = open(args.tgt_lang, 'r') output_file =", "or len(k[1]) == 1) and ('Q0' in args.nsst_file or k[0]", "write to csv if not len(reg): # catch empty registers", "= open(args.tgt_lang, 'r') output_file = open(args.output, 'w') # iterate over", "= best_transition_sequence(args) # get best result pred = sorted((k for", "file=output_file) output_file.flush() except RuntimeError: pass # close files src_file.close() tgt_file.close()", "parser.add_argument(\"--tgt_lang\", default=\"output/europarl-v7.de-en.en.clean\") parser.add_argument(\"--enforce_n_reg\", default=True) parser.add_argument(\"--output\", default=f\"output/nsst_stat_nreg_100Q0.csv\") args = parser.parse_args() args.enforce_n_final_reg", "try: # prepare tokenisations token_src = [nsst.tokenization_src[word] if word in", "sorted((k for k in result if ('Qf' in args.nsst_file or", "else 0 for word in tgt.split(\" \") if len(word)] #", "result = best_transition_sequence(args) # get best result pred = sorted((k", "__name__ == '__main__': parser = ArgumentParser() parser.add_argument(\"--nsst_file\", default=\"output/nsst_tss20_th4_nSt100_Q0.pkl\", help=\"nsst file\")", "first 4096 -> test sentences for src, tgt, _ in", "= [nsst.tokenization_tgt[word] if word in nsst.tokenization_tgt else 0 for word", "= \"\" for t in token_pred: token_pred_str += f\"{t} \"", "tgt = tgt[:-1] # try to translate try: # prepare", "'r') tgt_file = open(args.tgt_lang, 'r') output_file = open(args.output, 'w') #", "\" token_src_str = \"\" for t in token_src: token_src_str +=", "= src args.token_src = token_src result = best_transition_sequence(args) # get", "('Qf' in args.nsst_file or not args.enforce_n_final_reg or len(k[1]) == 1)", "= nsst # open files src_file = open(args.src_lang, 'r') tgt_file", "== -1) ), key=lambda x: x[2], reverse=True)[0] n_res = len(result)", "tgt_file = open(args.tgt_lang, 'r') output_file = open(args.output, 'w') # iterate", "token_src: token_src_str += f\"{t} \" token_tgt_str = \"\" for t", "w in reg[0].split(' ') if len(w)] pred_str = \"\" for", "try to translate try: # prepare tokenisations token_src = [nsst.tokenization_src[word]", "+= f\"{t} \" token_tgt_str = \"\" for t in token_tgt:", "in token_tgt: token_tgt_str += f\"{t} \" token_pred_str = \"\" for", "\"\" for t in token_pred: token_pred_str += f\"{t} \" print(f\"{src};{token_src_str[:-1]};\"", "load NSST nsst = NSST.NSST() nsst.load(args.nsst_file) args.nsst = nsst #", "+= f\"{t} \" print(f\"{src};{token_src_str[:-1]};\" f\"{tgt};{token_tgt_str[:-1]};\" f\"{pred_str};{token_pred_str[:-1]};\" f\"{prob};{len(reg)};{n_res}\", file=output_file) output_file.flush() except", "reg, prob = pred # write to csv if not", "in args.nsst_file or k[0] == -1) ), key=lambda x: x[2],", "open(args.output, 'w') # iterate over sentences, first 4096 -> test", "src[:-1] tgt = tgt[:-1] # try to translate try: #", "'__main__': parser = ArgumentParser() parser.add_argument(\"--nsst_file\", default=\"output/nsst_tss20_th4_nSt100_Q0.pkl\", help=\"nsst file\") parser.add_argument(\"--src_lang\", default=\"output/europarl-v7.de-en.de.clean\")", "remove line breaks src = src[:-1] tgt = tgt[:-1] #", "tokenisations token_src = [nsst.tokenization_src[word] if word in nsst.tokenization_src else 0", "= NSST.NSST() nsst.load(args.nsst_file) args.nsst = nsst # open files src_file", "x: x[2], reverse=True)[0] n_res = len(result) q, reg, prob =", "and ('Q0' in args.nsst_file or k[0] == -1) ), key=lambda", "default=\"output/europarl-v7.de-en.de.clean\") parser.add_argument(\"--tgt_lang\", default=\"output/europarl-v7.de-en.en.clean\") parser.add_argument(\"--enforce_n_reg\", default=True) parser.add_argument(\"--output\", default=f\"output/nsst_stat_nreg_100Q0.csv\") args = parser.parse_args()", "tqdm import tqdm import NSST from nsst_translate import best_transition_sequence if", "# get best result pred = sorted((k for k in", "default=\"output/europarl-v7.de-en.en.clean\") parser.add_argument(\"--enforce_n_reg\", default=True) parser.add_argument(\"--output\", default=f\"output/nsst_stat_nreg_100Q0.csv\") args = parser.parse_args() args.enforce_n_final_reg =", "4096 -> test sentences for src, tgt, _ in tqdm(list(zip(src_file,", "= \"\" for t in token_src: token_src_str += f\"{t} \"", "in token_src: token_src_str += f\"{t} \" token_tgt_str = \"\" for", "in nsst.tokenization_src else 0 for word in src.split(\" \") if", "\") if len(word)] # run nsst args.input = src args.token_src", "n_res = len(result) q, reg, prob = pred # write", "csv if not len(reg): # catch empty registers continue token_pred", "translate try: # prepare tokenisations token_src = [nsst.tokenization_src[word] if word", "# open files src_file = open(args.src_lang, 'r') tgt_file = open(args.tgt_lang,", "pred_str = \"\" for t in token_pred: pred_str += f\"{nsst.tokenization_tgt_lut[int(t)]}", "f\"{nsst.tokenization_tgt_lut[int(t)]} \" token_src_str = \"\" for t in token_src: token_src_str", "token_src_str = \"\" for t in token_src: token_src_str += f\"{t}", "or k[0] == -1) ), key=lambda x: x[2], reverse=True)[0] n_res", "# load NSST nsst = NSST.NSST() nsst.load(args.nsst_file) args.nsst = nsst", "if word in nsst.tokenization_tgt else 0 for word in tgt.split(\"", "nsst.tokenization_src else 0 for word in src.split(\" \") if len(word)]", "x[2], reverse=True)[0] n_res = len(result) q, reg, prob = pred", "parser.add_argument(\"--output\", default=f\"output/nsst_stat_nreg_100Q0.csv\") args = parser.parse_args() args.enforce_n_final_reg = False # load", "len(word)] token_tgt = [nsst.tokenization_tgt[word] if word in nsst.tokenization_tgt else 0", "catch empty registers continue token_pred = [w for w in", "token_pred: token_pred_str += f\"{t} \" print(f\"{src};{token_src_str[:-1]};\" f\"{tgt};{token_tgt_str[:-1]};\" f\"{pred_str};{token_pred_str[:-1]};\" f\"{prob};{len(reg)};{n_res}\", file=output_file)", "for t in token_tgt: token_tgt_str += f\"{t} \" token_pred_str =", "if len(word)] token_tgt = [nsst.tokenization_tgt[word] if word in nsst.tokenization_tgt else", "reverse=True)[0] n_res = len(result) q, reg, prob = pred #", "= pred # write to csv if not len(reg): #", "best_transition_sequence if __name__ == '__main__': parser = ArgumentParser() parser.add_argument(\"--nsst_file\", default=\"output/nsst_tss20_th4_nSt100_Q0.pkl\",", "= \"\" for t in token_pred: pred_str += f\"{nsst.tokenization_tgt_lut[int(t)]} \"", "t in token_pred: pred_str += f\"{nsst.tokenization_tgt_lut[int(t)]} \" token_src_str = \"\"", "in args.nsst_file or not args.enforce_n_final_reg or len(k[1]) == 1) and", "\") if len(word)] token_tgt = [nsst.tokenization_tgt[word] if word in nsst.tokenization_tgt", "nsst.tokenization_tgt else 0 for word in tgt.split(\" \") if len(word)]", "len(k[1]) == 1) and ('Q0' in args.nsst_file or k[0] ==", "src.split(\" \") if len(word)] token_tgt = [nsst.tokenization_tgt[word] if word in", "not len(reg): # catch empty registers continue token_pred = [w", "len(reg): # catch empty registers continue token_pred = [w for", "prepare tokenisations token_src = [nsst.tokenization_src[word] if word in nsst.tokenization_src else", "token_src result = best_transition_sequence(args) # get best result pred =", "from tqdm import tqdm import NSST from nsst_translate import best_transition_sequence", "continue token_pred = [w for w in reg[0].split(' ') if", "in token_pred: pred_str += f\"{nsst.tokenization_tgt_lut[int(t)]} \" token_src_str = \"\" for", "pred_str += f\"{nsst.tokenization_tgt_lut[int(t)]} \" token_src_str = \"\" for t in", "for w in reg[0].split(' ') if len(w)] pred_str = \"\"", "for t in token_pred: token_pred_str += f\"{t} \" print(f\"{src};{token_src_str[:-1]};\" f\"{tgt};{token_tgt_str[:-1]};\"", "_ in tqdm(list(zip(src_file, tgt_file, range(4096))), desc=\"Processing sentences\"): # remove line", "= open(args.src_lang, 'r') tgt_file = open(args.tgt_lang, 'r') output_file = open(args.output,", "if not len(reg): # catch empty registers continue token_pred =", "t in token_tgt: token_tgt_str += f\"{t} \" token_pred_str = \"\"", "token_tgt: token_tgt_str += f\"{t} \" token_pred_str = \"\" for t", "else 0 for word in src.split(\" \") if len(word)] token_tgt", "f\"{tgt};{token_tgt_str[:-1]};\" f\"{pred_str};{token_pred_str[:-1]};\" f\"{prob};{len(reg)};{n_res}\", file=output_file) output_file.flush() except RuntimeError: pass # close", "f\"{pred_str};{token_pred_str[:-1]};\" f\"{prob};{len(reg)};{n_res}\", file=output_file) output_file.flush() except RuntimeError: pass # close files", "for word in tgt.split(\" \") if len(word)] # run nsst", "NSST nsst = NSST.NSST() nsst.load(args.nsst_file) args.nsst = nsst # open", "= False # load NSST nsst = NSST.NSST() nsst.load(args.nsst_file) args.nsst", "args.nsst_file or not args.enforce_n_final_reg or len(k[1]) == 1) and ('Q0'", "in reg[0].split(' ') if len(w)] pred_str = \"\" for t", "token_pred = [w for w in reg[0].split(' ') if len(w)]", "pred = sorted((k for k in result if ('Qf' in", "NSST.NSST() nsst.load(args.nsst_file) args.nsst = nsst # open files src_file =", "in src.split(\" \") if len(word)] token_tgt = [nsst.tokenization_tgt[word] if word", "k in result if ('Qf' in args.nsst_file or not args.enforce_n_final_reg", "sentences, first 4096 -> test sentences for src, tgt, _", "args.enforce_n_final_reg or len(k[1]) == 1) and ('Q0' in args.nsst_file or", "for t in token_src: token_src_str += f\"{t} \" token_tgt_str =", "# remove line breaks src = src[:-1] tgt = tgt[:-1]", "sentences for src, tgt, _ in tqdm(list(zip(src_file, tgt_file, range(4096))), desc=\"Processing", "argparse import ArgumentParser from tqdm import tqdm import NSST from", "tqdm import NSST from nsst_translate import best_transition_sequence if __name__ ==", "('Q0' in args.nsst_file or k[0] == -1) ), key=lambda x:", "= token_src result = best_transition_sequence(args) # get best result pred", "\" token_tgt_str = \"\" for t in token_tgt: token_tgt_str +=", "open(args.tgt_lang, 'r') output_file = open(args.output, 'w') # iterate over sentences,", "token_tgt = [nsst.tokenization_tgt[word] if word in nsst.tokenization_tgt else 0 for", "to csv if not len(reg): # catch empty registers continue", "reg[0].split(' ') if len(w)] pred_str = \"\" for t in", "nsst args.input = src args.token_src = token_src result = best_transition_sequence(args)", "in tqdm(list(zip(src_file, tgt_file, range(4096))), desc=\"Processing sentences\"): # remove line breaks", "tqdm(list(zip(src_file, tgt_file, range(4096))), desc=\"Processing sentences\"): # remove line breaks src", "ArgumentParser from tqdm import tqdm import NSST from nsst_translate import", "default=True) parser.add_argument(\"--output\", default=f\"output/nsst_stat_nreg_100Q0.csv\") args = parser.parse_args() args.enforce_n_final_reg = False #", "word in nsst.tokenization_tgt else 0 for word in tgt.split(\" \")", "tgt.split(\" \") if len(word)] # run nsst args.input = src", "result pred = sorted((k for k in result if ('Qf'", "= [nsst.tokenization_src[word] if word in nsst.tokenization_src else 0 for word", "not args.enforce_n_final_reg or len(k[1]) == 1) and ('Q0' in args.nsst_file", "from argparse import ArgumentParser from tqdm import tqdm import NSST", "import tqdm import NSST from nsst_translate import best_transition_sequence if __name__", "[nsst.tokenization_tgt[word] if word in nsst.tokenization_tgt else 0 for word in", "# iterate over sentences, first 4096 -> test sentences for", "token_src = [nsst.tokenization_src[word] if word in nsst.tokenization_src else 0 for", "import ArgumentParser from tqdm import tqdm import NSST from nsst_translate", "0 for word in src.split(\" \") if len(word)] token_tgt =", "== 1) and ('Q0' in args.nsst_file or k[0] == -1)", "# catch empty registers continue token_pred = [w for w", "len(w)] pred_str = \"\" for t in token_pred: pred_str +=", "empty registers continue token_pred = [w for w in reg[0].split('", "tgt_file, range(4096))), desc=\"Processing sentences\"): # remove line breaks src =", "# prepare tokenisations token_src = [nsst.tokenization_src[word] if word in nsst.tokenization_src", "prob = pred # write to csv if not len(reg):", "word in tgt.split(\" \") if len(word)] # run nsst args.input", "in result if ('Qf' in args.nsst_file or not args.enforce_n_final_reg or", "parser.add_argument(\"--nsst_file\", default=\"output/nsst_tss20_th4_nSt100_Q0.pkl\", help=\"nsst file\") parser.add_argument(\"--src_lang\", default=\"output/europarl-v7.de-en.de.clean\") parser.add_argument(\"--tgt_lang\", default=\"output/europarl-v7.de-en.en.clean\") parser.add_argument(\"--enforce_n_reg\", default=True)", "get best result pred = sorted((k for k in result", "# try to translate try: # prepare tokenisations token_src =", "token_src_str += f\"{t} \" token_tgt_str = \"\" for t in", "token_pred_str = \"\" for t in token_pred: token_pred_str += f\"{t}", "from nsst_translate import best_transition_sequence if __name__ == '__main__': parser =", "1) and ('Q0' in args.nsst_file or k[0] == -1) ),", "args.nsst = nsst # open files src_file = open(args.src_lang, 'r')", "\" token_pred_str = \"\" for t in token_pred: token_pred_str +=", "ArgumentParser() parser.add_argument(\"--nsst_file\", default=\"output/nsst_tss20_th4_nSt100_Q0.pkl\", help=\"nsst file\") parser.add_argument(\"--src_lang\", default=\"output/europarl-v7.de-en.de.clean\") parser.add_argument(\"--tgt_lang\", default=\"output/europarl-v7.de-en.en.clean\") parser.add_argument(\"--enforce_n_reg\",", "if len(word)] # run nsst args.input = src args.token_src =", "files src_file = open(args.src_lang, 'r') tgt_file = open(args.tgt_lang, 'r') output_file", "f\"{t} \" print(f\"{src};{token_src_str[:-1]};\" f\"{tgt};{token_tgt_str[:-1]};\" f\"{pred_str};{token_pred_str[:-1]};\" f\"{prob};{len(reg)};{n_res}\", file=output_file) output_file.flush() except RuntimeError:", "args.enforce_n_final_reg = False # load NSST nsst = NSST.NSST() nsst.load(args.nsst_file)", "line breaks src = src[:-1] tgt = tgt[:-1] # try", "run nsst args.input = src args.token_src = token_src result =", "token_pred_str += f\"{t} \" print(f\"{src};{token_src_str[:-1]};\" f\"{tgt};{token_tgt_str[:-1]};\" f\"{pred_str};{token_pred_str[:-1]};\" f\"{prob};{len(reg)};{n_res}\", file=output_file) output_file.flush()", "), key=lambda x: x[2], reverse=True)[0] n_res = len(result) q, reg,", "parser.add_argument(\"--src_lang\", default=\"output/europarl-v7.de-en.de.clean\") parser.add_argument(\"--tgt_lang\", default=\"output/europarl-v7.de-en.en.clean\") parser.add_argument(\"--enforce_n_reg\", default=True) parser.add_argument(\"--output\", default=f\"output/nsst_stat_nreg_100Q0.csv\") args =", "'r') output_file = open(args.output, 'w') # iterate over sentences, first", "for k in result if ('Qf' in args.nsst_file or not", "registers continue token_pred = [w for w in reg[0].split(' ')", "src = src[:-1] tgt = tgt[:-1] # try to translate", "') if len(w)] pred_str = \"\" for t in token_pred:", "+= f\"{nsst.tokenization_tgt_lut[int(t)]} \" token_src_str = \"\" for t in token_src:", "if word in nsst.tokenization_src else 0 for word in src.split(\"", "nsst = NSST.NSST() nsst.load(args.nsst_file) args.nsst = nsst # open files", "# write to csv if not len(reg): # catch empty", "default=f\"output/nsst_stat_nreg_100Q0.csv\") args = parser.parse_args() args.enforce_n_final_reg = False # load NSST", "breaks src = src[:-1] tgt = tgt[:-1] # try to", "len(word)] # run nsst args.input = src args.token_src = token_src", "nsst # open files src_file = open(args.src_lang, 'r') tgt_file =", "default=\"output/nsst_tss20_th4_nSt100_Q0.pkl\", help=\"nsst file\") parser.add_argument(\"--src_lang\", default=\"output/europarl-v7.de-en.de.clean\") parser.add_argument(\"--tgt_lang\", default=\"output/europarl-v7.de-en.en.clean\") parser.add_argument(\"--enforce_n_reg\", default=True) parser.add_argument(\"--output\",", "# run nsst args.input = src args.token_src = token_src result", "result if ('Qf' in args.nsst_file or not args.enforce_n_final_reg or len(k[1])", "best_transition_sequence(args) # get best result pred = sorted((k for k", "f\"{prob};{len(reg)};{n_res}\", file=output_file) output_file.flush() except RuntimeError: pass # close files src_file.close()", "to translate try: # prepare tokenisations token_src = [nsst.tokenization_src[word] if", "\"\" for t in token_src: token_src_str += f\"{t} \" token_tgt_str", "= \"\" for t in token_tgt: token_tgt_str += f\"{t} \"", "0 for word in tgt.split(\" \") if len(word)] # run", "output_file.flush() except RuntimeError: pass # close files src_file.close() tgt_file.close() output_file.close()", "token_tgt_str += f\"{t} \" token_pred_str = \"\" for t in", "args = parser.parse_args() args.enforce_n_final_reg = False # load NSST nsst", "k[0] == -1) ), key=lambda x: x[2], reverse=True)[0] n_res =", "== '__main__': parser = ArgumentParser() parser.add_argument(\"--nsst_file\", default=\"output/nsst_tss20_th4_nSt100_Q0.pkl\", help=\"nsst file\") parser.add_argument(\"--src_lang\",", "= parser.parse_args() args.enforce_n_final_reg = False # load NSST nsst =", "+= f\"{t} \" token_pred_str = \"\" for t in token_pred:", "help=\"nsst file\") parser.add_argument(\"--src_lang\", default=\"output/europarl-v7.de-en.de.clean\") parser.add_argument(\"--tgt_lang\", default=\"output/europarl-v7.de-en.en.clean\") parser.add_argument(\"--enforce_n_reg\", default=True) parser.add_argument(\"--output\", default=f\"output/nsst_stat_nreg_100Q0.csv\")", "parser.parse_args() args.enforce_n_final_reg = False # load NSST nsst = NSST.NSST()", "args.nsst_file or k[0] == -1) ), key=lambda x: x[2], reverse=True)[0]", "f\"{t} \" token_tgt_str = \"\" for t in token_tgt: token_tgt_str", "in nsst.tokenization_tgt else 0 for word in tgt.split(\" \") if", "if len(w)] pred_str = \"\" for t in token_pred: pred_str", "t in token_src: token_src_str += f\"{t} \" token_tgt_str = \"\"", "'w') # iterate over sentences, first 4096 -> test sentences", "sentences\"): # remove line breaks src = src[:-1] tgt =", "in tgt.split(\" \") if len(word)] # run nsst args.input =", "= len(result) q, reg, prob = pred # write to", "for src, tgt, _ in tqdm(list(zip(src_file, tgt_file, range(4096))), desc=\"Processing sentences\"):", "\"\" for t in token_pred: pred_str += f\"{nsst.tokenization_tgt_lut[int(t)]} \" token_src_str", "iterate over sentences, first 4096 -> test sentences for src,", "= [w for w in reg[0].split(' ') if len(w)] pred_str", "= src[:-1] tgt = tgt[:-1] # try to translate try:", "parser = ArgumentParser() parser.add_argument(\"--nsst_file\", default=\"output/nsst_tss20_th4_nSt100_Q0.pkl\", help=\"nsst file\") parser.add_argument(\"--src_lang\", default=\"output/europarl-v7.de-en.de.clean\") parser.add_argument(\"--tgt_lang\",", "parser.add_argument(\"--enforce_n_reg\", default=True) parser.add_argument(\"--output\", default=f\"output/nsst_stat_nreg_100Q0.csv\") args = parser.parse_args() args.enforce_n_final_reg = False", "-> test sentences for src, tgt, _ in tqdm(list(zip(src_file, tgt_file,", "src args.token_src = token_src result = best_transition_sequence(args) # get best", "file\") parser.add_argument(\"--src_lang\", default=\"output/europarl-v7.de-en.de.clean\") parser.add_argument(\"--tgt_lang\", default=\"output/europarl-v7.de-en.en.clean\") parser.add_argument(\"--enforce_n_reg\", default=True) parser.add_argument(\"--output\", default=f\"output/nsst_stat_nreg_100Q0.csv\") args", "for word in src.split(\" \") if len(word)] token_tgt = [nsst.tokenization_tgt[word]", "t in token_pred: token_pred_str += f\"{t} \" print(f\"{src};{token_src_str[:-1]};\" f\"{tgt};{token_tgt_str[:-1]};\" f\"{pred_str};{token_pred_str[:-1]};\"", "import best_transition_sequence if __name__ == '__main__': parser = ArgumentParser() parser.add_argument(\"--nsst_file\",", "open files src_file = open(args.src_lang, 'r') tgt_file = open(args.tgt_lang, 'r')", "word in nsst.tokenization_src else 0 for word in src.split(\" \")", "nsst.load(args.nsst_file) args.nsst = nsst # open files src_file = open(args.src_lang,", "best result pred = sorted((k for k in result if", "len(result) q, reg, prob = pred # write to csv", "token_pred: pred_str += f\"{nsst.tokenization_tgt_lut[int(t)]} \" token_src_str = \"\" for t", "token_tgt_str = \"\" for t in token_tgt: token_tgt_str += f\"{t}", "\"\" for t in token_tgt: token_tgt_str += f\"{t} \" token_pred_str", "in token_pred: token_pred_str += f\"{t} \" print(f\"{src};{token_src_str[:-1]};\" f\"{tgt};{token_tgt_str[:-1]};\" f\"{pred_str};{token_pred_str[:-1]};\" f\"{prob};{len(reg)};{n_res}\"," ]
[ "sum(data) / len(data) def stddev(data, size): sum = 0 for", "functionts def mean(data): return sum(data) / len(data) def stddev(data, size):", "= sum + (data[i] - mean(data)) ** 2 return math.sqrt(sum", "math # Define functionts def mean(data): return sum(data) / len(data)", "Import library import math # Define functionts def mean(data): return", "sum = sum + (data[i] - mean(data)) ** 2 return", "Set data size = int(input()) numbers = list(map(int, input().split())) #", "0 for i in range(size): sum = sum + (data[i]", "(data[i] - mean(data)) ** 2 return math.sqrt(sum / size) #", "library import math # Define functionts def mean(data): return sum(data)", "mean(data): return sum(data) / len(data) def stddev(data, size): sum =", "size = int(input()) numbers = list(map(int, input().split())) # Get standard", "stddev(data, size): sum = 0 for i in range(size): sum", "sum + (data[i] - mean(data)) ** 2 return math.sqrt(sum /", "mean(data)) ** 2 return math.sqrt(sum / size) # Set data", "def mean(data): return sum(data) / len(data) def stddev(data, size): sum", "return math.sqrt(sum / size) # Set data size = int(input())", "import math # Define functionts def mean(data): return sum(data) /", "i in range(size): sum = sum + (data[i] - mean(data))", "/ size) # Set data size = int(input()) numbers =", "math.sqrt(sum / size) # Set data size = int(input()) numbers", "int(input()) numbers = list(map(int, input().split())) # Get standard deviation print(round(stddev(numbers,", "# Define functionts def mean(data): return sum(data) / len(data) def", "/ len(data) def stddev(data, size): sum = 0 for i", "= list(map(int, input().split())) # Get standard deviation print(round(stddev(numbers, size), 1))", "sum = 0 for i in range(size): sum = sum", "Define functionts def mean(data): return sum(data) / len(data) def stddev(data,", "for i in range(size): sum = sum + (data[i] -", "+ (data[i] - mean(data)) ** 2 return math.sqrt(sum / size)", "numbers = list(map(int, input().split())) # Get standard deviation print(round(stddev(numbers, size),", "size): sum = 0 for i in range(size): sum =", "# Import library import math # Define functionts def mean(data):", "** 2 return math.sqrt(sum / size) # Set data size", "2 return math.sqrt(sum / size) # Set data size =", "def stddev(data, size): sum = 0 for i in range(size):", "= 0 for i in range(size): sum = sum +", "len(data) def stddev(data, size): sum = 0 for i in", "data size = int(input()) numbers = list(map(int, input().split())) # Get", "return sum(data) / len(data) def stddev(data, size): sum = 0", "range(size): sum = sum + (data[i] - mean(data)) ** 2", "- mean(data)) ** 2 return math.sqrt(sum / size) # Set", "size) # Set data size = int(input()) numbers = list(map(int,", "# Set data size = int(input()) numbers = list(map(int, input().split()))", "= int(input()) numbers = list(map(int, input().split())) # Get standard deviation", "in range(size): sum = sum + (data[i] - mean(data)) **" ]
[ "if len(word1)==1: code+=word1+word2 return code else: code+=word1[0]+word2[0] return decode(word1[1:],word2[1:],code) Alice='Ti", "code+=word1[0]+word2[0] return decode(word1[1:],word2[1:],code) Alice='Ti rga eoe esg o h ore\"ermetsCmuainls'", "else: code+=word1[0]+word2[0] return decode(word1[1:],word2[1:],code) Alice='Ti rga eoe esg o h", "code+=word1+word2 return code else: code+=word1[0]+word2[0] return decode(word1[1:],word2[1:],code) Alice='Ti rga eoe", "return decode(word1[1:],word2[1:],code) Alice='Ti rga eoe esg o h ore\"ermetsCmuainls' Bob='hspormdcdsamsaefrte<NAME>ae\"'", "return code else: code+=word1[0]+word2[0] return decode(word1[1:],word2[1:],code) Alice='Ti rga eoe esg", "def decode(word1,word2,code): if len(word1)==1: code+=word1+word2 return code else: code+=word1[0]+word2[0] return", "decode(word1,word2,code): if len(word1)==1: code+=word1+word2 return code else: code+=word1[0]+word2[0] return decode(word1[1:],word2[1:],code)", "<reponame>jmsevillam/Herramientas-Computacionales-UniAndes def decode(word1,word2,code): if len(word1)==1: code+=word1+word2 return code else: code+=word1[0]+word2[0]", "code else: code+=word1[0]+word2[0] return decode(word1[1:],word2[1:],code) Alice='Ti rga eoe esg o", "decode(word1[1:],word2[1:],code) Alice='Ti rga eoe esg o h ore\"ermetsCmuainls' Bob='hspormdcdsamsaefrte<NAME>ae\"' print(decode(Alice,Bob,''))", "len(word1)==1: code+=word1+word2 return code else: code+=word1[0]+word2[0] return decode(word1[1:],word2[1:],code) Alice='Ti rga" ]
[ "2.0 (the \"License\"); # you may not use this file", "} ) self._plot_factory(g) return g def _plot_factory(self, graph): graph.new_plot( zoom=True,", "= True elif has_bounds: g.clear() self._plot_factory(g) self.warning_dialog(\"{} does not have", "zoom=True, pan=True, padding=[50, 10, 10, 40], xtitle=\"Setpoint (%)\", ytitle=\"Measured Power", "BoundsSelector(HasTraits): graph = Instance(Graph) def traits_view(self): v = View( Item(\"graph\",", "PowerCalibrationAdapter(name=name, kind=\"sqlite\") db.connect() return db def _selector_default(self): return self.db._selector_factory() if", "DelegatesTo, Button, List, Any, Float from traitsui.api import View, Item,", "import View, Item, VGroup, HGroup, Group, spring, TabularEditor # =============", "s in enumerate(self.selected_calibrations): if s.bounds: has_bounds = True elif has_bounds:", "= bounds s.calibration_bounds = ( polyval(s.coefficients, bounds[0]), polyval(s.coefficients, bounds[1]), )", "Instance(PowerCalibrationSelector) append = Button replace = Button load_graph = Button", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "def _dclicked_changed(self): s = self.selected if s is not None:", "HGroup, Group, spring, TabularEditor # ============= standard library imports ========================", "for s in self.selected_calibrations: coeffs.append(s.coefficients) bounds.append(s.calibration_bounds) pc.coefficients = coeffs pc.bounds", "style=\"custom\", show_label=False)) transfer_grp = VGroup( spring, VGroup(Item(\"append\", show_label=False), Item(\"replace\", show_label=False)),", "does not have its bounds set\".format(s.rid)) break s.load_graph(graph=g, new_plot=i ==", "for i, s in enumerate(self.selected_calibrations): if s.bounds: has_bounds = True", "polyval(s.coefficients, bounds[1]), ) def _append_fired(self): s = self.selector.selected if s", "i, s in enumerate(self.selected_calibrations): if s.bounds: has_bounds = True elif", "= VGroup( spring, VGroup(Item(\"append\", show_label=False), Item(\"replace\", show_label=False)), spring, ) editor", "_dump_calibration(self): pc = MeterCalibration() coeffs = [] bounds = []", "s.bounds: has_bounds = True elif has_bounds: g.clear() self._plot_factory(g) self.warning_dialog(\"{} does", "= Any parent_name = \"FusionsDiode\" power = Float input =", "Button save = Button selected_calibrations = List selected = Any", "if s is not None: for si in s: trs", "List selected = Any results = DelegatesTo(\"selector\") graph = Instance(Graph)", "not have its bounds set\".format(s.rid)) break s.load_graph(graph=g, new_plot=i == 0)", "use this file except in compliance with the License. #", "traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor #", "pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter from pychron.paths import paths from pychron.graph.graph import", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "style=\"custom\"), buttons=[\"OK\", \"Cancel\"], kind=\"livemodal\", ) return v class CompositeCalibrationManager(Manager): db", "License. # You may obtain a copy of the License", "self.selected_calibrations.append(si.clone_traits(traits=trs)) def _replace_fired(self): s = self.selector.selected trs = list(s.traits().keys()).remove(\"graph\") self.selected_calibrations", "pc def _dclicked_changed(self): s = self.selected if s is not", "as f: pickle.dump(pc, f) def _get_calibration_path(self): p = os.path.join( paths.hidden_dir,", "= View( VGroup(data_tab, process_tab), resizable=True, title=\"Composite {} Power Calibration\".format(self.parent_name), )", "kind=\"sqlite\") db.connect() return db def _selector_default(self): return self.db._selector_factory() if __name__", "under the License is distributed on an \"AS IS\" BASIS,", "_db_default(self): if self.parent_name == \"FusionsDiode\": name = paths.diodelaser_db else: name", "License for the specific language governing permissions and # limitations", "return v def _graph_default(self): g = Graph( container_dict={ # 'fill_padding':True,", "s.bounds = bounds s.calibration_bounds = ( polyval(s.coefficients, bounds[0]), polyval(s.coefficients, bounds[1]),", "Item(\"input\", format_str=\" %0.3f \", style=\"readonly\"), spring, Item(\"save\", show_label=False), Item(\"load_graph\", show_label=False),", "def _selector_default(self): return self.db._selector_factory() if __name__ == \"__main__\": ccm =", "s.load_graph() s.graph.add_range_selector() bc = BoundsSelector(graph=s.graph) info = bc.edit_traits() if info.result:", "= list(si.traits().keys()).remove(\"graph\") self.selected_calibrations.append(si.clone_traits(traits=trs)) def _replace_fired(self): s = self.selector.selected trs =", "[] for s in self.selected_calibrations: coeffs.append(s.coefficients) bounds.append(s.calibration_bounds) pc.coefficients = coeffs", "paths.diodelaser_db else: name = paths.co2laser_db db = PowerCalibrationAdapter(name=name, kind=\"sqlite\") db.connect()", "label=\"Data\", ) process_tab = Group( HGroup( Item(\"power\"), Item(\"input\", format_str=\" %0.3f", "traits_view(self): selector_grp = Group(Item(\"selector\", style=\"custom\", show_label=False)) transfer_grp = VGroup( spring,", "show_label=False), show_border=True, label=\"Process\", ) v = View( VGroup(data_tab, process_tab), resizable=True,", "try: p = self._get_calibration_path() with open(p, \"rb\") as f: pc", "g.new_plot(zoom=True, pan=True, # padding=[40, 10, 10, 40] # ) has_bounds", "transfer_grp, selected_grp), show_border=True, label=\"Data\", ) process_tab = Group( HGroup( Item(\"power\"),", "is not None: self.input, _ = pc.get_input(self.power) def _load_calibration(self): try:", "Instance, DelegatesTo, Button, List, Any, Float from traitsui.api import View,", "selected_calibrations = List selected = Any results = DelegatesTo(\"selector\") graph", "= self.selector.selected if s is not None: for si in", "= \"FusionsDiode\" power = Float input = Float def _power_changed(self):", "= List selected = Any results = DelegatesTo(\"selector\") graph =", "in self.selected_calibrations: coeffs.append(s.coefficients) bounds.append(s.calibration_bounds) pc.coefficients = coeffs pc.bounds = bounds", "library imports ======================== import pickle import os from numpy import", "== 0) g.redraw() def traits_view(self): selector_grp = Group(Item(\"selector\", style=\"custom\", show_label=False))", "if __name__ == \"__main__\": ccm = CompositeCalibrationManager() ccm.configure_traits() # =============", "= [] bounds = [] for s in self.selected_calibrations: coeffs.append(s.coefficients)", "data_tab = Group( HGroup(selector_grp, transfer_grp, selected_grp), show_border=True, label=\"Data\", ) process_tab", "in compliance with the License. # You may obtain a", "\"wb\") as f: pickle.dump(pc, f) def _get_calibration_path(self): p = os.path.join(", "software # distributed under the License is distributed on an", "= Button save = Button selected_calibrations = List selected =", "traits_view(self): v = View( Item(\"graph\", show_label=False, style=\"custom\"), buttons=[\"OK\", \"Cancel\"], kind=\"livemodal\",", "polyval(s.coefficients, bounds[0]), polyval(s.coefficients, bounds[1]), ) def _append_fired(self): s = self.selector.selected", "g.clear() # g.new_plot(zoom=True, pan=True, # padding=[40, 10, 10, 40] #", "power = Float input = Float def _power_changed(self): pc =", "class CompositeCalibrationManager(Manager): db = Instance(PowerCalibrationAdapter) selector = Instance(PowerCalibrationSelector) append =", "under the License. # =============================================================================== # ============= enthought library imports", "= coeffs pc.bounds = bounds p = self._get_calibration_path() self.info(\"saving calibration", "for si in s: trs = list(si.traits().keys()).remove(\"graph\") self.selected_calibrations.append(si.clone_traits(traits=trs)) def _replace_fired(self):", "HasTraits, Instance, DelegatesTo, Button, List, Any, Float from traitsui.api import", "except: return return pc def _dclicked_changed(self): s = self.selected if", "Group( HGroup(selector_grp, transfer_grp, selected_grp), show_border=True, label=\"Data\", ) process_tab = Group(", "%0.3f \", style=\"readonly\"), spring, Item(\"save\", show_label=False), Item(\"load_graph\", show_label=False), ), Item(\"graph\",", "style=\"custom\", show_label=False), show_border=True, label=\"Process\", ) v = View( VGroup(data_tab, process_tab),", "return p def _load_graph_fired(self): g = self.graph g.clear() # g.new_plot(zoom=True,", "= Any results = DelegatesTo(\"selector\") graph = Instance(Graph) dclicked =", "= DelegatesTo(\"selector\") graph = Instance(Graph) dclicked = Any parent_name =", "pychron.managers.manager import Manager from pychron.database.selectors.power_calibration_selector import ( PowerCalibrationSelector, ) from", "p = self._get_calibration_path() with open(p, \"rb\") as f: pc =", "self.warning_dialog(\"{} does not have its bounds set\".format(s.rid)) break s.load_graph(graph=g, new_plot=i", "), Item(\"graph\", style=\"custom\", show_label=False), show_border=True, label=\"Process\", ) v = View(", "from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor", "a dbselector to select data \"\"\" class BoundsSelector(HasTraits): graph =", "paths from pychron.graph.graph import Graph from pychron.hardware.meter_calibration import MeterCalibration \"\"\"", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "import os from numpy import polyval # ============= local library", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "pc = MeterCalibration() coeffs = [] bounds = [] for", "to in writing, software # distributed under the License is", "import HasTraits, Instance, DelegatesTo, Button, List, Any, Float from traitsui.api", "editable=False, dclicked=\"object.dclicked\", selected=\"object.selected\", ) selected_grp = Item(\"selected_calibrations\", editor=editor, show_label=False) data_tab", "spring, ) editor = TabularEditor( adapter=self.selector.tabular_adapter(), editable=False, dclicked=\"object.dclicked\", selected=\"object.selected\", )", "# See the License for the specific language governing permissions", "selector_grp = Group(Item(\"selector\", style=\"custom\", show_label=False)) transfer_grp = VGroup( spring, VGroup(Item(\"append\",", "# Copyright 2012 <NAME> # # Licensed under the Apache", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "ytitle=\"Measured Power (W)\", ) def _db_default(self): if self.parent_name == \"FusionsDiode\":", "_load_calibration(self): try: p = self._get_calibration_path() with open(p, \"rb\") as f:", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "absolute_import from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any,", "with the License. # You may obtain a copy of", "have its bounds set\".format(s.rid)) break s.load_graph(graph=g, new_plot=i == 0) g.redraw()", "self.selector.selected trs = list(s.traits().keys()).remove(\"graph\") self.selected_calibrations = s.clone_traits(traits=trs) def _save_fired(self): self._dump_calibration()", "s is not None: s.bounds = None s.load_graph() s.graph.add_range_selector() bc", "import ( PowerCalibrationSelector, ) from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter from pychron.paths", "import pickle import os from numpy import polyval # =============", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "Item(\"graph\", show_label=False, style=\"custom\"), buttons=[\"OK\", \"Cancel\"], kind=\"livemodal\", ) return v class", "Instance(Graph) dclicked = Any parent_name = \"FusionsDiode\" power = Float", "\"Cancel\"], kind=\"livemodal\", ) return v class CompositeCalibrationManager(Manager): db = Instance(PowerCalibrationAdapter)", "=============================================================================== # ============= enthought library imports ======================= from __future__ import", "= Instance(PowerCalibrationSelector) append = Button replace = Button load_graph =", "return g def _plot_factory(self, graph): graph.new_plot( zoom=True, pan=True, padding=[50, 10,", "self._plot_factory(g) self.warning_dialog(\"{} does not have its bounds set\".format(s.rid)) break s.load_graph(graph=g,", "distributed under the License is distributed on an \"AS IS\"", "MeterCalibration \"\"\" use a dbselector to select data \"\"\" class", "def _get_calibration_path(self): p = os.path.join( paths.hidden_dir, \"{}_power_calibration\".format(self.parent_name) ) return p", "10, 10, 40], xtitle=\"Setpoint (%)\", ytitle=\"Measured Power (W)\", ) def", "permissions and # limitations under the License. # =============================================================================== #", "p = self._get_calibration_path() self.info(\"saving calibration to {}\".format(p)) with open(p, \"wb\")", "\"\"\" class BoundsSelector(HasTraits): graph = Instance(Graph) def traits_view(self): v =", "db = Instance(PowerCalibrationAdapter) selector = Instance(PowerCalibrationSelector) append = Button replace", "= self.selector.selected trs = list(s.traits().keys()).remove(\"graph\") self.selected_calibrations = s.clone_traits(traits=trs) def _save_fired(self):", "label=\"Process\", ) v = View( VGroup(data_tab, process_tab), resizable=True, title=\"Composite {}", "Any, Float from traitsui.api import View, Item, VGroup, HGroup, Group,", "trs = list(s.traits().keys()).remove(\"graph\") self.selected_calibrations = s.clone_traits(traits=trs) def _save_fired(self): self._dump_calibration() def", "its bounds set\".format(s.rid)) break s.load_graph(graph=g, new_plot=i == 0) g.redraw() def", "== \"FusionsDiode\": name = paths.diodelaser_db else: name = paths.co2laser_db db", "express or implied. # See the License for the specific", "HGroup(selector_grp, transfer_grp, selected_grp), show_border=True, label=\"Data\", ) process_tab = Group( HGroup(", "except in compliance with the License. # You may obtain", "graph = Instance(Graph) def traits_view(self): v = View( Item(\"graph\", show_label=False,", "= Instance(Graph) dclicked = Any parent_name = \"FusionsDiode\" power =", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "def _power_changed(self): pc = self._load_calibration() pc if pc is not", "not use this file except in compliance with the License.", "pc.bounds = bounds p = self._get_calibration_path() self.info(\"saving calibration to {}\".format(p))", "pickle.load(f) except: return return pc def _dclicked_changed(self): s = self.selected", "p = os.path.join( paths.hidden_dir, \"{}_power_calibration\".format(self.parent_name) ) return p def _load_graph_fired(self):", "10, 40] # ) has_bounds = False for i, s", "writing, software # distributed under the License is distributed on", "graph = Instance(Graph) dclicked = Any parent_name = \"FusionsDiode\" power", "HGroup( Item(\"power\"), Item(\"input\", format_str=\" %0.3f \", style=\"readonly\"), spring, Item(\"save\", show_label=False),", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "if self.parent_name == \"FusionsDiode\": name = paths.diodelaser_db else: name =", "kind=\"livemodal\", ) return v class CompositeCalibrationManager(Manager): db = Instance(PowerCalibrationAdapter) selector", "paths.hidden_dir, \"{}_power_calibration\".format(self.parent_name) ) return p def _load_graph_fired(self): g = self.graph", ") v = View( VGroup(data_tab, process_tab), resizable=True, title=\"Composite {} Power", "and # limitations under the License. # =============================================================================== # =============", "def _load_graph_fired(self): g = self.graph g.clear() # g.new_plot(zoom=True, pan=True, #", "adapter=self.selector.tabular_adapter(), editable=False, dclicked=\"object.dclicked\", selected=\"object.selected\", ) selected_grp = Item(\"selected_calibrations\", editor=editor, show_label=False)", "CONDITIONS OF ANY KIND, either express or implied. # See", "_plot_factory(self, graph): graph.new_plot( zoom=True, pan=True, padding=[50, 10, 10, 40], xtitle=\"Setpoint", "f) def _get_calibration_path(self): p = os.path.join( paths.hidden_dir, \"{}_power_calibration\".format(self.parent_name) ) return", "======================== import pickle import os from numpy import polyval #", "pickle import os from numpy import polyval # ============= local", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "selected_grp), show_border=True, label=\"Data\", ) process_tab = Group( HGroup( Item(\"power\"), Item(\"input\",", "'bgcolor':'red', \"padding\": 5 } ) self._plot_factory(g) return g def _plot_factory(self,", "s.bounds = None s.load_graph() s.graph.add_range_selector() bc = BoundsSelector(graph=s.graph) info =", "in s: trs = list(si.traits().keys()).remove(\"graph\") self.selected_calibrations.append(si.clone_traits(traits=trs)) def _replace_fired(self): s =", "pychron.hardware.meter_calibration import MeterCalibration \"\"\" use a dbselector to select data", "selected = Any results = DelegatesTo(\"selector\") graph = Instance(Graph) dclicked", "None: self.input, _ = pc.get_input(self.power) def _load_calibration(self): try: p =", "f: pc = pickle.load(f) except: return return pc def _dclicked_changed(self):", "Button, List, Any, Float from traitsui.api import View, Item, VGroup,", "return return pc def _dclicked_changed(self): s = self.selected if s", "s = self.selector.selected if s is not None: for si", "True elif has_bounds: g.clear() self._plot_factory(g) self.warning_dialog(\"{} does not have its", "with open(p, \"rb\") as f: pc = pickle.load(f) except: return", "_load_graph_fired(self): g = self.graph g.clear() # g.new_plot(zoom=True, pan=True, # padding=[40,", "= View( Item(\"graph\", show_label=False, style=\"custom\"), buttons=[\"OK\", \"Cancel\"], kind=\"livemodal\", ) return", "return db def _selector_default(self): return self.db._selector_factory() if __name__ == \"__main__\":", "self.info(\"saving calibration to {}\".format(p)) with open(p, \"wb\") as f: pickle.dump(pc,", "_append_fired(self): s = self.selector.selected if s is not None: for", "self.graph g.clear() # g.new_plot(zoom=True, pan=True, # padding=[40, 10, 10, 40]", "= paths.diodelaser_db else: name = paths.co2laser_db db = PowerCalibrationAdapter(name=name, kind=\"sqlite\")", "_get_calibration_path(self): p = os.path.join( paths.hidden_dir, \"{}_power_calibration\".format(self.parent_name) ) return p def", "imports ======================= from __future__ import absolute_import from traits.api import HasTraits,", "not None: s.bounds = None s.load_graph() s.graph.add_range_selector() bc = BoundsSelector(graph=s.graph)", "OR CONDITIONS OF ANY KIND, either express or implied. #", "\"FusionsDiode\" power = Float input = Float def _power_changed(self): pc", "info = bc.edit_traits() if info.result: bounds = s.graph.plots[0].default_index.metadata[\"selections\"] s.bounds =", "the License is distributed on an \"AS IS\" BASIS, #", "return pc def _dclicked_changed(self): s = self.selected if s is", "os.path.join( paths.hidden_dir, \"{}_power_calibration\".format(self.parent_name) ) return p def _load_graph_fired(self): g =", ") return v def _graph_default(self): g = Graph( container_dict={ #", "select data \"\"\" class BoundsSelector(HasTraits): graph = Instance(Graph) def traits_view(self):", "imports ========================== from pychron.managers.manager import Manager from pychron.database.selectors.power_calibration_selector import (", "# 'bgcolor':'red', \"padding\": 5 } ) self._plot_factory(g) return g def", "Instance(Graph) def traits_view(self): v = View( Item(\"graph\", show_label=False, style=\"custom\"), buttons=[\"OK\",", "pc if pc is not None: self.input, _ = pc.get_input(self.power)", "load_graph = Button save = Button selected_calibrations = List selected", "= Group( HGroup(selector_grp, transfer_grp, selected_grp), show_border=True, label=\"Data\", ) process_tab =", "_graph_default(self): g = Graph( container_dict={ # 'fill_padding':True, # 'bgcolor':'red', \"padding\":", "Group( HGroup( Item(\"power\"), Item(\"input\", format_str=\" %0.3f \", style=\"readonly\"), spring, Item(\"save\",", "# g.new_plot(zoom=True, pan=True, # padding=[40, 10, 10, 40] # )", "to select data \"\"\" class BoundsSelector(HasTraits): graph = Instance(Graph) def", "= self._get_calibration_path() self.info(\"saving calibration to {}\".format(p)) with open(p, \"wb\") as", "= Button replace = Button load_graph = Button save =", "library imports ========================== from pychron.managers.manager import Manager from pychron.database.selectors.power_calibration_selector import", "bc = BoundsSelector(graph=s.graph) info = bc.edit_traits() if info.result: bounds =", "2012 <NAME> # # Licensed under the Apache License, Version", "s.load_graph(graph=g, new_plot=i == 0) g.redraw() def traits_view(self): selector_grp = Group(Item(\"selector\",", "law or agreed to in writing, software # distributed under", "show_border=True, label=\"Process\", ) v = View( VGroup(data_tab, process_tab), resizable=True, title=\"Composite", "def _plot_factory(self, graph): graph.new_plot( zoom=True, pan=True, padding=[50, 10, 10, 40],", "library imports ======================= from __future__ import absolute_import from traits.api import", "Power (W)\", ) def _db_default(self): if self.parent_name == \"FusionsDiode\": name", "save = Button selected_calibrations = List selected = Any results", "= TabularEditor( adapter=self.selector.tabular_adapter(), editable=False, dclicked=\"object.dclicked\", selected=\"object.selected\", ) selected_grp = Item(\"selected_calibrations\",", "Group, spring, TabularEditor # ============= standard library imports ======================== import", "= s.clone_traits(traits=trs) def _save_fired(self): self._dump_calibration() def _dump_calibration(self): pc = MeterCalibration()", "= bc.edit_traits() if info.result: bounds = s.graph.plots[0].default_index.metadata[\"selections\"] s.bounds = bounds", "self.db._selector_factory() if __name__ == \"__main__\": ccm = CompositeCalibrationManager() ccm.configure_traits() #", "local library imports ========================== from pychron.managers.manager import Manager from pychron.database.selectors.power_calibration_selector", "========================== from pychron.managers.manager import Manager from pychron.database.selectors.power_calibration_selector import ( PowerCalibrationSelector,", "TabularEditor # ============= standard library imports ======================== import pickle import", "Item(\"save\", show_label=False), Item(\"load_graph\", show_label=False), ), Item(\"graph\", style=\"custom\", show_label=False), show_border=True, label=\"Process\",", "pickle.dump(pc, f) def _get_calibration_path(self): p = os.path.join( paths.hidden_dir, \"{}_power_calibration\".format(self.parent_name) )", "may obtain a copy of the License at # #", "BoundsSelector(graph=s.graph) info = bc.edit_traits() if info.result: bounds = s.graph.plots[0].default_index.metadata[\"selections\"] s.bounds", "Button load_graph = Button save = Button selected_calibrations = List", "40] # ) has_bounds = False for i, s in", "v = View( Item(\"graph\", show_label=False, style=\"custom\"), buttons=[\"OK\", \"Cancel\"], kind=\"livemodal\", )", "show_label=False, style=\"custom\"), buttons=[\"OK\", \"Cancel\"], kind=\"livemodal\", ) return v class CompositeCalibrationManager(Manager):", "Group(Item(\"selector\", style=\"custom\", show_label=False)) transfer_grp = VGroup( spring, VGroup(Item(\"append\", show_label=False), Item(\"replace\",", "# =============================================================================== # ============= enthought library imports ======================= from __future__", "name = paths.diodelaser_db else: name = paths.co2laser_db db = PowerCalibrationAdapter(name=name,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "use a dbselector to select data \"\"\" class BoundsSelector(HasTraits): graph", "is not None: s.bounds = None s.load_graph() s.graph.add_range_selector() bc =", "bounds set\".format(s.rid)) break s.load_graph(graph=g, new_plot=i == 0) g.redraw() def traits_view(self):", "pc.get_input(self.power) def _load_calibration(self): try: p = self._get_calibration_path() with open(p, \"rb\")", "calibration to {}\".format(p)) with open(p, \"wb\") as f: pickle.dump(pc, f)", "db = PowerCalibrationAdapter(name=name, kind=\"sqlite\") db.connect() return db def _selector_default(self): return", "show_label=False), Item(\"load_graph\", show_label=False), ), Item(\"graph\", style=\"custom\", show_label=False), show_border=True, label=\"Process\", )", "may not use this file except in compliance with the", "enumerate(self.selected_calibrations): if s.bounds: has_bounds = True elif has_bounds: g.clear() self._plot_factory(g)", "def traits_view(self): v = View( Item(\"graph\", show_label=False, style=\"custom\"), buttons=[\"OK\", \"Cancel\"],", "governing permissions and # limitations under the License. # ===============================================================================", "selector = Instance(PowerCalibrationSelector) append = Button replace = Button load_graph", "g.clear() self._plot_factory(g) self.warning_dialog(\"{} does not have its bounds set\".format(s.rid)) break", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "View( Item(\"graph\", show_label=False, style=\"custom\"), buttons=[\"OK\", \"Cancel\"], kind=\"livemodal\", ) return v", "40], xtitle=\"Setpoint (%)\", ytitle=\"Measured Power (W)\", ) def _db_default(self): if", "v class CompositeCalibrationManager(Manager): db = Instance(PowerCalibrationAdapter) selector = Instance(PowerCalibrationSelector) append", "this file except in compliance with the License. # You", "self.selected_calibrations: coeffs.append(s.coefficients) bounds.append(s.calibration_bounds) pc.coefficients = coeffs pc.bounds = bounds p", "\"rb\") as f: pc = pickle.load(f) except: return return pc", "License. # =============================================================================== # ============= enthought library imports ======================= from", "traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float from", "db.connect() return db def _selector_default(self): return self.db._selector_factory() if __name__ ==", "pc is not None: self.input, _ = pc.get_input(self.power) def _load_calibration(self):", "format_str=\" %0.3f \", style=\"readonly\"), spring, Item(\"save\", show_label=False), Item(\"load_graph\", show_label=False), ),", "<NAME> # # Licensed under the Apache License, Version 2.0", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "pychron.graph.graph import Graph from pychron.hardware.meter_calibration import MeterCalibration \"\"\" use a", "= self._load_calibration() pc if pc is not None: self.input, _", "# # Licensed under the Apache License, Version 2.0 (the", "self.selected if s is not None: s.bounds = None s.load_graph()", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Float input = Float def _power_changed(self): pc = self._load_calibration() pc", "= MeterCalibration() coeffs = [] bounds = [] for s", "View, Item, VGroup, HGroup, Group, spring, TabularEditor # ============= standard", "from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter from pychron.paths import paths from pychron.graph.graph", "Any results = DelegatesTo(\"selector\") graph = Instance(Graph) dclicked = Any", "VGroup(Item(\"append\", show_label=False), Item(\"replace\", show_label=False)), spring, ) editor = TabularEditor( adapter=self.selector.tabular_adapter(),", "def _dump_calibration(self): pc = MeterCalibration() coeffs = [] bounds =", "trs = list(si.traits().keys()).remove(\"graph\") self.selected_calibrations.append(si.clone_traits(traits=trs)) def _replace_fired(self): s = self.selector.selected trs", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "def _append_fired(self): s = self.selector.selected if s is not None:", "as f: pc = pickle.load(f) except: return return pc def", "10, 10, 40] # ) has_bounds = False for i,", "= Float input = Float def _power_changed(self): pc = self._load_calibration()", "return self.db._selector_factory() if __name__ == \"__main__\": ccm = CompositeCalibrationManager() ccm.configure_traits()", "= ( polyval(s.coefficients, bounds[0]), polyval(s.coefficients, bounds[1]), ) def _append_fired(self): s", "( polyval(s.coefficients, bounds[0]), polyval(s.coefficients, bounds[1]), ) def _append_fired(self): s =", "list(s.traits().keys()).remove(\"graph\") self.selected_calibrations = s.clone_traits(traits=trs) def _save_fired(self): self._dump_calibration() def _dump_calibration(self): pc", "== \"__main__\": ccm = CompositeCalibrationManager() ccm.configure_traits() # ============= EOF =============================================", "s.graph.add_range_selector() bc = BoundsSelector(graph=s.graph) info = bc.edit_traits() if info.result: bounds", "(%)\", ytitle=\"Measured Power (W)\", ) def _db_default(self): if self.parent_name ==", "\"FusionsDiode\": name = paths.diodelaser_db else: name = paths.co2laser_db db =", "= Instance(Graph) def traits_view(self): v = View( Item(\"graph\", show_label=False, style=\"custom\"),", "Item(\"load_graph\", show_label=False), ), Item(\"graph\", style=\"custom\", show_label=False), show_border=True, label=\"Process\", ) v", "editor = TabularEditor( adapter=self.selector.tabular_adapter(), editable=False, dclicked=\"object.dclicked\", selected=\"object.selected\", ) selected_grp =", "title=\"Composite {} Power Calibration\".format(self.parent_name), ) return v def _graph_default(self): g", "limitations under the License. # =============================================================================== # ============= enthought library", "open(p, \"wb\") as f: pickle.dump(pc, f) def _get_calibration_path(self): p =", "from pychron.hardware.meter_calibration import MeterCalibration \"\"\" use a dbselector to select", "show_label=False), Item(\"replace\", show_label=False)), spring, ) editor = TabularEditor( adapter=self.selector.tabular_adapter(), editable=False,", "si in s: trs = list(si.traits().keys()).remove(\"graph\") self.selected_calibrations.append(si.clone_traits(traits=trs)) def _replace_fired(self): s", "import MeterCalibration \"\"\" use a dbselector to select data \"\"\"", "if pc is not None: self.input, _ = pc.get_input(self.power) def", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "VGroup(data_tab, process_tab), resizable=True, title=\"Composite {} Power Calibration\".format(self.parent_name), ) return v", "import polyval # ============= local library imports ========================== from pychron.managers.manager", "s.clone_traits(traits=trs) def _save_fired(self): self._dump_calibration() def _dump_calibration(self): pc = MeterCalibration() coeffs", "os from numpy import polyval # ============= local library imports", "= False for i, s in enumerate(self.selected_calibrations): if s.bounds: has_bounds", "or implied. # See the License for the specific language", "s: trs = list(si.traits().keys()).remove(\"graph\") self.selected_calibrations.append(si.clone_traits(traits=trs)) def _replace_fired(self): s = self.selector.selected", "s = self.selected if s is not None: s.bounds =", "_power_changed(self): pc = self._load_calibration() pc if pc is not None:", "<reponame>ASUPychron/pychron # =============================================================================== # Copyright 2012 <NAME> # # Licensed", "Item, VGroup, HGroup, Group, spring, TabularEditor # ============= standard library", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "style=\"readonly\"), spring, Item(\"save\", show_label=False), Item(\"load_graph\", show_label=False), ), Item(\"graph\", style=\"custom\", show_label=False),", ") editor = TabularEditor( adapter=self.selector.tabular_adapter(), editable=False, dclicked=\"object.dclicked\", selected=\"object.selected\", ) selected_grp", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "imports ======================== import pickle import os from numpy import polyval", "g def _plot_factory(self, graph): graph.new_plot( zoom=True, pan=True, padding=[50, 10, 10,", "0) g.redraw() def traits_view(self): selector_grp = Group(Item(\"selector\", style=\"custom\", show_label=False)) transfer_grp", "pan=True, # padding=[40, 10, 10, 40] # ) has_bounds =", "self.parent_name == \"FusionsDiode\": name = paths.diodelaser_db else: name = paths.co2laser_db", ") selected_grp = Item(\"selected_calibrations\", editor=editor, show_label=False) data_tab = Group( HGroup(selector_grp,", "= None s.load_graph() s.graph.add_range_selector() bc = BoundsSelector(graph=s.graph) info = bc.edit_traits()", "return v class CompositeCalibrationManager(Manager): db = Instance(PowerCalibrationAdapter) selector = Instance(PowerCalibrationSelector)", "VGroup, HGroup, Group, spring, TabularEditor # ============= standard library imports", "(the \"License\"); # you may not use this file except", ") process_tab = Group( HGroup( Item(\"power\"), Item(\"input\", format_str=\" %0.3f \",", "# you may not use this file except in compliance", "bounds[0]), polyval(s.coefficients, bounds[1]), ) def _append_fired(self): s = self.selector.selected if", "import paths from pychron.graph.graph import Graph from pychron.hardware.meter_calibration import MeterCalibration", "graph.new_plot( zoom=True, pan=True, padding=[50, 10, 10, 40], xtitle=\"Setpoint (%)\", ytitle=\"Measured", "the License. # =============================================================================== # ============= enthought library imports =======================", "is not None: for si in s: trs = list(si.traits().keys()).remove(\"graph\")", "\"{}_power_calibration\".format(self.parent_name) ) return p def _load_graph_fired(self): g = self.graph g.clear()", "\"padding\": 5 } ) self._plot_factory(g) return g def _plot_factory(self, graph):", "from pychron.paths import paths from pychron.graph.graph import Graph from pychron.hardware.meter_calibration", "self._get_calibration_path() with open(p, \"rb\") as f: pc = pickle.load(f) except:", "= self.graph g.clear() # g.new_plot(zoom=True, pan=True, # padding=[40, 10, 10,", "_save_fired(self): self._dump_calibration() def _dump_calibration(self): pc = MeterCalibration() coeffs = []", "# # Unless required by applicable law or agreed to", "container_dict={ # 'fill_padding':True, # 'bgcolor':'red', \"padding\": 5 } ) self._plot_factory(g)", "Calibration\".format(self.parent_name), ) return v def _graph_default(self): g = Graph( container_dict={", "Item(\"power\"), Item(\"input\", format_str=\" %0.3f \", style=\"readonly\"), spring, Item(\"save\", show_label=False), Item(\"load_graph\",", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "============= standard library imports ======================== import pickle import os from", "Button replace = Button load_graph = Button save = Button", "parent_name = \"FusionsDiode\" power = Float input = Float def", "= [] for s in self.selected_calibrations: coeffs.append(s.coefficients) bounds.append(s.calibration_bounds) pc.coefficients =", "transfer_grp = VGroup( spring, VGroup(Item(\"append\", show_label=False), Item(\"replace\", show_label=False)), spring, )", "Version 2.0 (the \"License\"); # you may not use this", "False for i, s in enumerate(self.selected_calibrations): if s.bounds: has_bounds =", ") def _db_default(self): if self.parent_name == \"FusionsDiode\": name = paths.diodelaser_db", "enthought library imports ======================= from __future__ import absolute_import from traits.api", "in enumerate(self.selected_calibrations): if s.bounds: has_bounds = True elif has_bounds: g.clear()", "show_label=False) data_tab = Group( HGroup(selector_grp, transfer_grp, selected_grp), show_border=True, label=\"Data\", )", "new_plot=i == 0) g.redraw() def traits_view(self): selector_grp = Group(Item(\"selector\", style=\"custom\",", "with open(p, \"wb\") as f: pickle.dump(pc, f) def _get_calibration_path(self): p", "def traits_view(self): selector_grp = Group(Item(\"selector\", style=\"custom\", show_label=False)) transfer_grp = VGroup(", "s is not None: for si in s: trs =", "= s.graph.plots[0].default_index.metadata[\"selections\"] s.bounds = bounds s.calibration_bounds = ( polyval(s.coefficients, bounds[0]),", "None s.load_graph() s.graph.add_range_selector() bc = BoundsSelector(graph=s.graph) info = bc.edit_traits() if", "def _load_calibration(self): try: p = self._get_calibration_path() with open(p, \"rb\") as", "= list(s.traits().keys()).remove(\"graph\") self.selected_calibrations = s.clone_traits(traits=trs) def _save_fired(self): self._dump_calibration() def _dump_calibration(self):", "s.calibration_bounds = ( polyval(s.coefficients, bounds[0]), polyval(s.coefficients, bounds[1]), ) def _append_fired(self):", "implied. # See the License for the specific language governing", "coeffs pc.bounds = bounds p = self._get_calibration_path() self.info(\"saving calibration to", "graph): graph.new_plot( zoom=True, pan=True, padding=[50, 10, 10, 40], xtitle=\"Setpoint (%)\",", "from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float", "self.input, _ = pc.get_input(self.power) def _load_calibration(self): try: p = self._get_calibration_path()", "under the Apache License, Version 2.0 (the \"License\"); # you", "Power Calibration\".format(self.parent_name), ) return v def _graph_default(self): g = Graph(", "results = DelegatesTo(\"selector\") graph = Instance(Graph) dclicked = Any parent_name", "============= enthought library imports ======================= from __future__ import absolute_import from", "class BoundsSelector(HasTraits): graph = Instance(Graph) def traits_view(self): v = View(", "elif has_bounds: g.clear() self._plot_factory(g) self.warning_dialog(\"{} does not have its bounds", "= pc.get_input(self.power) def _load_calibration(self): try: p = self._get_calibration_path() with open(p,", "MeterCalibration() coeffs = [] bounds = [] for s in", "= Item(\"selected_calibrations\", editor=editor, show_label=False) data_tab = Group( HGroup(selector_grp, transfer_grp, selected_grp),", "# ) has_bounds = False for i, s in enumerate(self.selected_calibrations):", "by applicable law or agreed to in writing, software #", "show_label=False), ), Item(\"graph\", style=\"custom\", show_label=False), show_border=True, label=\"Process\", ) v =", "_replace_fired(self): s = self.selector.selected trs = list(s.traits().keys()).remove(\"graph\") self.selected_calibrations = s.clone_traits(traits=trs)", "from pychron.managers.manager import Manager from pychron.database.selectors.power_calibration_selector import ( PowerCalibrationSelector, )", "s in self.selected_calibrations: coeffs.append(s.coefficients) bounds.append(s.calibration_bounds) pc.coefficients = coeffs pc.bounds =", ") self._plot_factory(g) return g def _plot_factory(self, graph): graph.new_plot( zoom=True, pan=True,", "import absolute_import from traits.api import HasTraits, Instance, DelegatesTo, Button, List,", "g = self.graph g.clear() # g.new_plot(zoom=True, pan=True, # padding=[40, 10,", "self.selector.selected if s is not None: for si in s:", "input = Float def _power_changed(self): pc = self._load_calibration() pc if", ") has_bounds = False for i, s in enumerate(self.selected_calibrations): if", "= BoundsSelector(graph=s.graph) info = bc.edit_traits() if info.result: bounds = s.graph.plots[0].default_index.metadata[\"selections\"]", "from __future__ import absolute_import from traits.api import HasTraits, Instance, DelegatesTo,", "= pickle.load(f) except: return return pc def _dclicked_changed(self): s =", "# ============= standard library imports ======================== import pickle import os", "Float from traitsui.api import View, Item, VGroup, HGroup, Group, spring,", "# =============================================================================== # Copyright 2012 <NAME> # # Licensed under", "bounds s.calibration_bounds = ( polyval(s.coefficients, bounds[0]), polyval(s.coefficients, bounds[1]), ) def", "Instance(PowerCalibrationAdapter) selector = Instance(PowerCalibrationSelector) append = Button replace = Button", "= Instance(PowerCalibrationAdapter) selector = Instance(PowerCalibrationSelector) append = Button replace =", "process_tab), resizable=True, title=\"Composite {} Power Calibration\".format(self.parent_name), ) return v def", "list(si.traits().keys()).remove(\"graph\") self.selected_calibrations.append(si.clone_traits(traits=trs)) def _replace_fired(self): s = self.selector.selected trs = list(s.traits().keys()).remove(\"graph\")", "Graph from pychron.hardware.meter_calibration import MeterCalibration \"\"\" use a dbselector to", "pc.coefficients = coeffs pc.bounds = bounds p = self._get_calibration_path() self.info(\"saving", "dclicked=\"object.dclicked\", selected=\"object.selected\", ) selected_grp = Item(\"selected_calibrations\", editor=editor, show_label=False) data_tab =", "self._dump_calibration() def _dump_calibration(self): pc = MeterCalibration() coeffs = [] bounds", "polyval # ============= local library imports ========================== from pychron.managers.manager import", "process_tab = Group( HGroup( Item(\"power\"), Item(\"input\", format_str=\" %0.3f \", style=\"readonly\"),", "VGroup( spring, VGroup(Item(\"append\", show_label=False), Item(\"replace\", show_label=False)), spring, ) editor =", "= paths.co2laser_db db = PowerCalibrationAdapter(name=name, kind=\"sqlite\") db.connect() return db def", "bounds p = self._get_calibration_path() self.info(\"saving calibration to {}\".format(p)) with open(p,", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Unless required by applicable law or agreed to in writing,", "Item(\"graph\", style=\"custom\", show_label=False), show_border=True, label=\"Process\", ) v = View( VGroup(data_tab,", "from numpy import polyval # ============= local library imports ==========================", "= self.selected if s is not None: s.bounds = None", "pan=True, padding=[50, 10, 10, 40], xtitle=\"Setpoint (%)\", ytitle=\"Measured Power (W)\",", "the specific language governing permissions and # limitations under the", "PowerCalibrationSelector, ) from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter from pychron.paths import paths", "show_label=False)) transfer_grp = VGroup( spring, VGroup(Item(\"append\", show_label=False), Item(\"replace\", show_label=False)), spring,", "from pychron.graph.graph import Graph from pychron.hardware.meter_calibration import MeterCalibration \"\"\" use", "applicable law or agreed to in writing, software # distributed", "name = paths.co2laser_db db = PowerCalibrationAdapter(name=name, kind=\"sqlite\") db.connect() return db", "from pychron.database.selectors.power_calibration_selector import ( PowerCalibrationSelector, ) from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter", ") return v class CompositeCalibrationManager(Manager): db = Instance(PowerCalibrationAdapter) selector =", "p def _load_graph_fired(self): g = self.graph g.clear() # g.new_plot(zoom=True, pan=True,", "bc.edit_traits() if info.result: bounds = s.graph.plots[0].default_index.metadata[\"selections\"] s.bounds = bounds s.calibration_bounds", "dclicked = Any parent_name = \"FusionsDiode\" power = Float input", "__future__ import absolute_import from traits.api import HasTraits, Instance, DelegatesTo, Button,", "_ = pc.get_input(self.power) def _load_calibration(self): try: p = self._get_calibration_path() with", "bounds[1]), ) def _append_fired(self): s = self.selector.selected if s is", ") def _append_fired(self): s = self.selector.selected if s is not", "to {}\".format(p)) with open(p, \"wb\") as f: pickle.dump(pc, f) def", "xtitle=\"Setpoint (%)\", ytitle=\"Measured Power (W)\", ) def _db_default(self): if self.parent_name", "spring, TabularEditor # ============= standard library imports ======================== import pickle", "= Float def _power_changed(self): pc = self._load_calibration() pc if pc", "TabularEditor( adapter=self.selector.tabular_adapter(), editable=False, dclicked=\"object.dclicked\", selected=\"object.selected\", ) selected_grp = Item(\"selected_calibrations\", editor=editor,", "in writing, software # distributed under the License is distributed", "{} Power Calibration\".format(self.parent_name), ) return v def _graph_default(self): g =", ") from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter from pychron.paths import paths from", "Button selected_calibrations = List selected = Any results = DelegatesTo(\"selector\")", "# 'fill_padding':True, # 'bgcolor':'red', \"padding\": 5 } ) self._plot_factory(g) return", "# ============= enthought library imports ======================= from __future__ import absolute_import", "Any parent_name = \"FusionsDiode\" power = Float input = Float", "============= local library imports ========================== from pychron.managers.manager import Manager from", "_dclicked_changed(self): s = self.selected if s is not None: s.bounds", "def _replace_fired(self): s = self.selector.selected trs = list(s.traits().keys()).remove(\"graph\") self.selected_calibrations =", "paths.co2laser_db db = PowerCalibrationAdapter(name=name, kind=\"sqlite\") db.connect() return db def _selector_default(self):", "( PowerCalibrationSelector, ) from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter from pychron.paths import", "= Graph( container_dict={ # 'fill_padding':True, # 'bgcolor':'red', \"padding\": 5 }", "Graph( container_dict={ # 'fill_padding':True, # 'bgcolor':'red', \"padding\": 5 } )", "= self._get_calibration_path() with open(p, \"rb\") as f: pc = pickle.load(f)", "self.selected_calibrations = s.clone_traits(traits=trs) def _save_fired(self): self._dump_calibration() def _dump_calibration(self): pc =", "self._load_calibration() pc if pc is not None: self.input, _ =", "# limitations under the License. # =============================================================================== # ============= enthought", "def _graph_default(self): g = Graph( container_dict={ # 'fill_padding':True, # 'bgcolor':'red',", "= PowerCalibrationAdapter(name=name, kind=\"sqlite\") db.connect() return db def _selector_default(self): return self.db._selector_factory()", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "resizable=True, title=\"Composite {} Power Calibration\".format(self.parent_name), ) return v def _graph_default(self):", "has_bounds: g.clear() self._plot_factory(g) self.warning_dialog(\"{} does not have its bounds set\".format(s.rid))", "v def _graph_default(self): g = Graph( container_dict={ # 'fill_padding':True, #", "PowerCalibrationAdapter from pychron.paths import paths from pychron.graph.graph import Graph from", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "__name__ == \"__main__\": ccm = CompositeCalibrationManager() ccm.configure_traits() # ============= EOF", "bounds = [] for s in self.selected_calibrations: coeffs.append(s.coefficients) bounds.append(s.calibration_bounds) pc.coefficients", "# padding=[40, 10, 10, 40] # ) has_bounds = False", "has_bounds = True elif has_bounds: g.clear() self._plot_factory(g) self.warning_dialog(\"{} does not", "bounds = s.graph.plots[0].default_index.metadata[\"selections\"] s.bounds = bounds s.calibration_bounds = ( polyval(s.coefficients,", "pc = self._load_calibration() pc if pc is not None: self.input,", "View( VGroup(data_tab, process_tab), resizable=True, title=\"Composite {} Power Calibration\".format(self.parent_name), ) return", "= Button selected_calibrations = List selected = Any results =", "show_label=False)), spring, ) editor = TabularEditor( adapter=self.selector.tabular_adapter(), editable=False, dclicked=\"object.dclicked\", selected=\"object.selected\",", "coeffs.append(s.coefficients) bounds.append(s.calibration_bounds) pc.coefficients = coeffs pc.bounds = bounds p =", "10, 40], xtitle=\"Setpoint (%)\", ytitle=\"Measured Power (W)\", ) def _db_default(self):", "the License for the specific language governing permissions and #", "replace = Button load_graph = Button save = Button selected_calibrations", "CompositeCalibrationManager(Manager): db = Instance(PowerCalibrationAdapter) selector = Instance(PowerCalibrationSelector) append = Button", "(W)\", ) def _db_default(self): if self.parent_name == \"FusionsDiode\": name =", "DelegatesTo(\"selector\") graph = Instance(Graph) dclicked = Any parent_name = \"FusionsDiode\"", "Apache License, Version 2.0 (the \"License\"); # you may not", "standard library imports ======================== import pickle import os from numpy", "either express or implied. # See the License for the", "selected=\"object.selected\", ) selected_grp = Item(\"selected_calibrations\", editor=editor, show_label=False) data_tab = Group(", "Item(\"selected_calibrations\", editor=editor, show_label=False) data_tab = Group( HGroup(selector_grp, transfer_grp, selected_grp), show_border=True,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "editor=editor, show_label=False) data_tab = Group( HGroup(selector_grp, transfer_grp, selected_grp), show_border=True, label=\"Data\",", "db def _selector_default(self): return self.db._selector_factory() if __name__ == \"__main__\": ccm", "# ============= local library imports ========================== from pychron.managers.manager import Manager", "coeffs = [] bounds = [] for s in self.selected_calibrations:", "if s.bounds: has_bounds = True elif has_bounds: g.clear() self._plot_factory(g) self.warning_dialog(\"{}", "numpy import polyval # ============= local library imports ========================== from", "if s is not None: s.bounds = None s.load_graph() s.graph.add_range_selector()", "= Group(Item(\"selector\", style=\"custom\", show_label=False)) transfer_grp = VGroup( spring, VGroup(Item(\"append\", show_label=False),", "set\".format(s.rid)) break s.load_graph(graph=g, new_plot=i == 0) g.redraw() def traits_view(self): selector_grp", "======================= from __future__ import absolute_import from traits.api import HasTraits, Instance,", "None: s.bounds = None s.load_graph() s.graph.add_range_selector() bc = BoundsSelector(graph=s.graph) info", "= os.path.join( paths.hidden_dir, \"{}_power_calibration\".format(self.parent_name) ) return p def _load_graph_fired(self): g", "g = Graph( container_dict={ # 'fill_padding':True, # 'bgcolor':'red', \"padding\": 5", "s = self.selector.selected trs = list(s.traits().keys()).remove(\"graph\") self.selected_calibrations = s.clone_traits(traits=trs) def", "None: for si in s: trs = list(si.traits().keys()).remove(\"graph\") self.selected_calibrations.append(si.clone_traits(traits=trs)) def", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "import PowerCalibrationAdapter from pychron.paths import paths from pychron.graph.graph import Graph", "show_border=True, label=\"Data\", ) process_tab = Group( HGroup( Item(\"power\"), Item(\"input\", format_str=\"", "g.redraw() def traits_view(self): selector_grp = Group(Item(\"selector\", style=\"custom\", show_label=False)) transfer_grp =", "def _save_fired(self): self._dump_calibration() def _dump_calibration(self): pc = MeterCalibration() coeffs =", "import Manager from pychron.database.selectors.power_calibration_selector import ( PowerCalibrationSelector, ) from pychron.database.adapters.power_calibration_adapter", "self._plot_factory(g) return g def _plot_factory(self, graph): graph.new_plot( zoom=True, pan=True, padding=[50,", "f: pickle.dump(pc, f) def _get_calibration_path(self): p = os.path.join( paths.hidden_dir, \"{}_power_calibration\".format(self.parent_name)", "if info.result: bounds = s.graph.plots[0].default_index.metadata[\"selections\"] s.bounds = bounds s.calibration_bounds =", "padding=[50, 10, 10, 40], xtitle=\"Setpoint (%)\", ytitle=\"Measured Power (W)\", )", "self._get_calibration_path() self.info(\"saving calibration to {}\".format(p)) with open(p, \"wb\") as f:", "spring, Item(\"save\", show_label=False), Item(\"load_graph\", show_label=False), ), Item(\"graph\", style=\"custom\", show_label=False), show_border=True,", "Manager from pychron.database.selectors.power_calibration_selector import ( PowerCalibrationSelector, ) from pychron.database.adapters.power_calibration_adapter import", "selected_grp = Item(\"selected_calibrations\", editor=editor, show_label=False) data_tab = Group( HGroup(selector_grp, transfer_grp,", "'fill_padding':True, # 'bgcolor':'red', \"padding\": 5 } ) self._plot_factory(g) return g", "pychron.paths import paths from pychron.graph.graph import Graph from pychron.hardware.meter_calibration import", "append = Button replace = Button load_graph = Button save", "_selector_default(self): return self.db._selector_factory() if __name__ == \"__main__\": ccm = CompositeCalibrationManager()", "\"License\"); # you may not use this file except in", "dbselector to select data \"\"\" class BoundsSelector(HasTraits): graph = Instance(Graph)", "List, Any, Float from traitsui.api import View, Item, VGroup, HGroup,", "s.graph.plots[0].default_index.metadata[\"selections\"] s.bounds = bounds s.calibration_bounds = ( polyval(s.coefficients, bounds[0]), polyval(s.coefficients,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "5 } ) self._plot_factory(g) return g def _plot_factory(self, graph): graph.new_plot(", "info.result: bounds = s.graph.plots[0].default_index.metadata[\"selections\"] s.bounds = bounds s.calibration_bounds = (", "Float def _power_changed(self): pc = self._load_calibration() pc if pc is", "not None: for si in s: trs = list(si.traits().keys()).remove(\"graph\") self.selected_calibrations.append(si.clone_traits(traits=trs))", "pc = pickle.load(f) except: return return pc def _dclicked_changed(self): s", "= bounds p = self._get_calibration_path() self.info(\"saving calibration to {}\".format(p)) with", "\", style=\"readonly\"), spring, Item(\"save\", show_label=False), Item(\"load_graph\", show_label=False), ), Item(\"graph\", style=\"custom\",", "def _db_default(self): if self.parent_name == \"FusionsDiode\": name = paths.diodelaser_db else:", "Copyright 2012 <NAME> # # Licensed under the Apache License,", "# distributed under the License is distributed on an \"AS", "not None: self.input, _ = pc.get_input(self.power) def _load_calibration(self): try: p", "# Unless required by applicable law or agreed to in", "= Button load_graph = Button save = Button selected_calibrations =", "open(p, \"rb\") as f: pc = pickle.load(f) except: return return", "spring, VGroup(Item(\"append\", show_label=False), Item(\"replace\", show_label=False)), spring, ) editor = TabularEditor(", "v = View( VGroup(data_tab, process_tab), resizable=True, title=\"Composite {} Power Calibration\".format(self.parent_name),", "=============================================================================== # Copyright 2012 <NAME> # # Licensed under the", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "\"\"\" use a dbselector to select data \"\"\" class BoundsSelector(HasTraits):", "has_bounds = False for i, s in enumerate(self.selected_calibrations): if s.bounds:", "bounds.append(s.calibration_bounds) pc.coefficients = coeffs pc.bounds = bounds p = self._get_calibration_path()", "buttons=[\"OK\", \"Cancel\"], kind=\"livemodal\", ) return v class CompositeCalibrationManager(Manager): db =", "You may obtain a copy of the License at #", "import Graph from pychron.hardware.meter_calibration import MeterCalibration \"\"\" use a dbselector", "padding=[40, 10, 10, 40] # ) has_bounds = False for", "the Apache License, Version 2.0 (the \"License\"); # you may", "pychron.database.selectors.power_calibration_selector import ( PowerCalibrationSelector, ) from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter from", "[] bounds = [] for s in self.selected_calibrations: coeffs.append(s.coefficients) bounds.append(s.calibration_bounds)", ") return p def _load_graph_fired(self): g = self.graph g.clear() #", "{}\".format(p)) with open(p, \"wb\") as f: pickle.dump(pc, f) def _get_calibration_path(self):", "else: name = paths.co2laser_db db = PowerCalibrationAdapter(name=name, kind=\"sqlite\") db.connect() return", "data \"\"\" class BoundsSelector(HasTraits): graph = Instance(Graph) def traits_view(self): v", "break s.load_graph(graph=g, new_plot=i == 0) g.redraw() def traits_view(self): selector_grp =", "Item(\"replace\", show_label=False)), spring, ) editor = TabularEditor( adapter=self.selector.tabular_adapter(), editable=False, dclicked=\"object.dclicked\",", "= Group( HGroup( Item(\"power\"), Item(\"input\", format_str=\" %0.3f \", style=\"readonly\"), spring," ]
[ "detransposed recorded games for the next move recommended_move = maximin(maximin_list)", "these_probs = reoriented_candidates[this_game][\"probs\"][num_moves].copy( ) # tack on the cell #", "in range(len(reoriented_candidates)): these_probs = [] # get the probability element", "tg_list) # for each of the 8 transposed versions of", "build a list of lower and upper bound tuples for", "into 8 different games and store in a list #", "no matching games in the game history #print(\"best_move: random choice...\")", "we need to look at losing and drawing games so", "will be resolved later via backpropagation candidate_games = ttt_base.get_games_list(bounds_list) #print(\"best_move:", "choice...\") # return random_move(this_board) # estimate the optimal next move", "# iterate though the game candidates for this_game in range(len(reoriented_candidates)):", "good move made early that resulted in a draw/loss because", "recommended_move else: # there are no matching games in the", "games in the game history #print(\"best_move: random choice...\") # return", "=\", tg_list) # for each of the 8 transposed versions", "current game state if candidate_games != False: # this is", "of the current game reoriented_candidates = reorient_games(tg_list, candidate_games) #print(\"best_move: number", "later via backpropagation candidate_games = ttt_base.get_games_list(bounds_list) #print(\"best_move: candidate_games =\", candidate_games)", "number of the move about to be made num_moves =", "estimate the optimal next move optimal_move = probs_calc.calc_next_move(this_board) #print(\"This board", "data to the list to be submitted to maximin maximin_list.append(these_probs)", "make_play to simplify # by Russell on 3/5/21 #from ttt_package.libs.move_utils", "source function, to allow the game to be transposed back", "list of candidate games from the game history # we", "ttt_package.libs.compare import get_transposed_games, reorient_games from ttt_package.libs.calc_game_bound import calc_game_bound from ttt_package.libs.maxi_min", "# return random_move(this_board) # estimate the optimal next move optimal_move", "optimal next move optimal_move = probs_calc.calc_next_move(this_board) #print(\"This board =\", this_board)", "the transposed game list # de-transpose the candidate games to", "store in a list # the returned value is a", "import maximin # find the best move for this agent,", "tg_list: lower_bound = calc_game_bound(tgame[\"transpose\"], agent, 'L') upper_bound = calc_game_bound(tgame[\"transpose\"], agent,", "\", recommended_move) return recommended_move else: # there are no matching", "different games and store in a list # the returned", "of reoriented_candidates games = \", len(reoriented_candidates)) #print(\"best_move: number of candidate", "bounds_list = [] #print(\"best_move - this_board:\", this_board) # TRANSPOSE the", "for each of the 8 transposed versions of the current", "return recommended_move else: # there are no matching games in", "to allow the game to be transposed back tg_list =", "= \", len(reoriented_candidates)) #print(\"best_move: number of candidate games = \",", "board =\", this_board) #print(\"Calculating optimal move =\", optimal_move) return optimal_move", "probs_calc): candidate_games = [] lower_bound = 0 upper_bound = 0", "len gives the number of the move about to be", "maximin_list = [] # iterate though the game candidates for", "and drawing games so that we can thoroughly explore the", "the probability element for the next move of this game", "tgame in tg_list: lower_bound = calc_game_bound(tgame[\"transpose\"], agent, 'L') upper_bound =", "in the game_history def best_move(this_board, agent, ttt_base, probs_calc): candidate_games =", "look at losing and drawing games so that we can", "that we can thoroughly explore the action space # we", "the current game state if candidate_games != False: # this", "list of the matching detransposition games of the current game", "game submission data to the list to be submitted to", "#print(\"This board =\", this_board) #print(\"Calculating optimal move =\", optimal_move) return", "game # and the source function, to allow the game", "#print(\"best_move: move = \", recommended_move) return recommended_move else: # there", "the game history #print(\"best_move: random choice...\") # return random_move(this_board) #", "len(reoriented_candidates)) #print(\"best_move: number of candidate games = \", len(candidate_games)) #print('best_move:", "list # de-transpose the candidate games to get the right", "for the next move recommended_move = maximin(maximin_list) #print(\"best_move: move =", "later bad move - these will be resolved later via", "based on prior games in the game_history def best_move(this_board, agent,", "candidate_games) maximin_list = [] # iterate though the game candidates", "#print(\"best_move: number of candidate games = \", len(candidate_games)) #print('best_move: reoriented_candidates", "the current game reoriented_candidates = reorient_games(tg_list, candidate_games) #print(\"best_move: number of", "agent, ttt_base, probs_calc): candidate_games = [] lower_bound = 0 upper_bound", "calc_game_bound(tgame[\"transpose\"], agent, 'U') bounds_tuple = (lower_bound, upper_bound) bounds_list.append(bounds_tuple) #print(\"best_move: bounds_tuple", "\", len(reoriented_candidates)) #print(\"best_move: number of candidate games = \", len(candidate_games))", "# if there is at least one game that matches", "reoriented_candidates) #print('best_move: candidate_games =', candidate_games) maximin_list = [] # iterate", "# fetch the list of candidate games from the game", "game in question # build a list of lower and", "this_game in range(len(reoriented_candidates)): these_probs = [] # get the probability", "be submitted to maximin maximin_list.append(these_probs) #print(\"maximin_list:\", maximin_list) # send the", "tack on the cell # of the move these_probs.append( reoriented_candidates[this_game][\"game\"][num_moves])", "# append the game submission data to the list to", "current game state into 8 different games and store in", "# TRANSPOSE the current game state into 8 different games", "maximin maximin_list.append(these_probs) #print(\"maximin_list:\", maximin_list) # send the list of probabilites", "and store in a list # the returned value is", "num_moves = len(this_board) bounds_list = [] #print(\"best_move - this_board:\", this_board)", "of probabilites of the detransposed recorded games for the next", "dictionaries that contain the transposed game # and the source", "= 0 # note that len gives the number of", "next move of this game candidate these_probs = reoriented_candidates[this_game][\"probs\"][num_moves].copy( )", "state if candidate_games != False: # this is the list", "= \", recommended_move) return recommended_move else: # there are no", "get_transposed_games(this_board) #print(\"best_move: tg_list =\", tg_list) # for each of the", "= 0 upper_bound = 0 # note that len gives", "from make_play to simplify # by Russell on 3/5/21 #from", "matching detransposition games of the current game reoriented_candidates = reorient_games(tg_list,", "bounds_tuple = (lower_bound, upper_bound) bounds_list.append(bounds_tuple) #print(\"best_move: bounds_tuple =\", bounds_tuple) #", "next move recommended_move = maximin(maximin_list) #print(\"best_move: move = \", recommended_move)", "for the tg_list using calc_game_bound for tgame in tg_list: lower_bound", "move for this agent, based on prior games in the", "candidates for this_game in range(len(reoriented_candidates)): these_probs = [] # get", "is the list of games that match the transposed game", "#print(\"best_move: random choice...\") # return random_move(this_board) # estimate the optimal", "- these will be resolved later via backpropagation candidate_games =", "recorded games for the next move recommended_move = maximin(maximin_list) #print(\"best_move:", "each of the 8 transposed versions of the current game", "the 8 transposed versions of the current game in question", "# later bad move - these will be resolved later", "note that len gives the number of the move about", "the list of games that match the transposed game list", "= [] # iterate though the game candidates for this_game", "the transposed game # and the source function, to allow", "len(this_board) bounds_list = [] #print(\"best_move - this_board:\", this_board) # TRANSPOSE", "reoriented_candidates games = \", len(reoriented_candidates)) #print(\"best_move: number of candidate games", "of the move these_probs.append( reoriented_candidates[this_game][\"game\"][num_moves]) # append the game submission", "game that matches the current game state if candidate_games !=", "games = \", len(candidate_games)) #print('best_move: reoriented_candidates =', reoriented_candidates) #print('best_move: candidate_games", "the returned value is a list of dictionaries that contain", "#print(\"best_move: number of reoriented_candidates games = \", len(reoriented_candidates)) #print(\"best_move: number", "one game that matches the current game state if candidate_games", "get_open_cells from ttt_package.libs.compare import get_transposed_games, reorient_games from ttt_package.libs.calc_game_bound import calc_game_bound", "= ttt_base.get_games_list(bounds_list) #print(\"best_move: candidate_games =\", candidate_games) # if there is", "drawing games so that we can thoroughly explore the action", "so that we can thoroughly explore the action space #", "of the 8 transposed versions of the current game in", "a list of dictionaries that contain the transposed game #", "game history #print(\"best_move: random choice...\") # return random_move(this_board) # estimate", "best_move(this_board, agent, ttt_base, probs_calc): candidate_games = [] lower_bound = 0", "game_history def best_move(this_board, agent, ttt_base, probs_calc): candidate_games = [] lower_bound", "the list to be submitted to maximin maximin_list.append(these_probs) #print(\"maximin_list:\", maximin_list)", "in a list # the returned value is a list", "submission data to the list to be submitted to maximin", "probabilites of the detransposed recorded games for the next move", "in a draw/loss because of a # later bad move", "reoriented_candidates = reorient_games(tg_list, candidate_games) #print(\"best_move: number of reoriented_candidates games =", "of a # later bad move - these will be", "lower_bound = 0 upper_bound = 0 # note that len", "bound tuples for the tg_list using calc_game_bound for tgame in", "game candidates for this_game in range(len(reoriented_candidates)): these_probs = [] #", "[] lower_bound = 0 upper_bound = 0 # note that", "for the next move # get a list of the", "#print('best_move: reoriented_candidates =', reoriented_candidates) #print('best_move: candidate_games =', candidate_games) maximin_list =", "if there is at least one game that matches the", "be resolved later via backpropagation candidate_games = ttt_base.get_games_list(bounds_list) #print(\"best_move: candidate_games", "=\", bounds_tuple) # fetch the list of candidate games from", "tg_list using calc_game_bound for tgame in tg_list: lower_bound = calc_game_bound(tgame[\"transpose\"],", "because of a # later bad move - these will", "# de-transpose the candidate games to get the right cell", "that match the transposed game list # de-transpose the candidate", "3/5/21 #from ttt_package.libs.move_utils import get_open_cells from ttt_package.libs.compare import get_transposed_games, reorient_games", "= [] # get the probability element for the next", "transposed game list # de-transpose the candidate games to get", "games for the next move recommended_move = maximin(maximin_list) #print(\"best_move: move", "next move # get a list of the matching detransposition", "get a list of the matching detransposition games of the", "- this_board:\", this_board) # TRANSPOSE the current game state into", "about to be made num_moves = len(this_board) bounds_list = []", "be made num_moves = len(this_board) bounds_list = [] #print(\"best_move -", "game list # de-transpose the candidate games to get the", "list # the returned value is a list of dictionaries", "move recommended_move = maximin(maximin_list) #print(\"best_move: move = \", recommended_move) return", "the next move # get a list of the matching", "# we must avoid overlooking a good move made early", "ttt_package.libs.move_utils import get_open_cells from ttt_package.libs.compare import get_transposed_games, reorient_games from ttt_package.libs.calc_game_bound", "[] # iterate though the game candidates for this_game in", "this is the list of games that match the transposed", "maximin # find the best move for this agent, based", "the optimal next move optimal_move = probs_calc.calc_next_move(this_board) #print(\"This board =\",", "import get_open_cells from ttt_package.libs.compare import get_transposed_games, reorient_games from ttt_package.libs.calc_game_bound import", "this_board) # TRANSPOSE the current game state into 8 different", "early that resulted in a draw/loss because of a #", "candidate games = \", len(candidate_games)) #print('best_move: reoriented_candidates =', reoriented_candidates) #print('best_move:", "for this_game in range(len(reoriented_candidates)): these_probs = [] # get the", "game candidate these_probs = reoriented_candidates[this_game][\"probs\"][num_moves].copy( ) # tack on the", "in question # build a list of lower and upper", "# we need to look at losing and drawing games", "move = \", recommended_move) return recommended_move else: # there are", "#print(\"best_move: candidate_games =\", candidate_games) # if there is at least", "games to get the right cell for the next move", "bounds_list.append(bounds_tuple) #print(\"best_move: bounds_tuple =\", bounds_tuple) # fetch the list of", "move # get a list of the matching detransposition games", "reoriented_candidates =', reoriented_candidates) #print('best_move: candidate_games =', candidate_games) maximin_list = []", "tuples for the tg_list using calc_game_bound for tgame in tg_list:", "(lower_bound, upper_bound) bounds_list.append(bounds_tuple) #print(\"best_move: bounds_tuple =\", bounds_tuple) # fetch the", "the current game state into 8 different games and store", "on prior games in the game_history def best_move(this_board, agent, ttt_base,", "maximin_list.append(these_probs) #print(\"maximin_list:\", maximin_list) # send the list of probabilites of", "and the source function, to allow the game to be", "# build a list of lower and upper bound tuples", "move made early that resulted in a draw/loss because of", "[] # get the probability element for the next move", "[] #print(\"best_move - this_board:\", this_board) # TRANSPOSE the current game", "list of games that match the transposed game list #", "ttt_base, probs_calc): candidate_games = [] lower_bound = 0 upper_bound =", "element for the next move of this game candidate these_probs", "append the game submission data to the list to be", "to look at losing and drawing games so that we", "ttt_package.libs.calc_game_bound import calc_game_bound from ttt_package.libs.maxi_min import maximin # find the", "the game_history def best_move(this_board, agent, ttt_base, probs_calc): candidate_games = []", "random_move(this_board) # estimate the optimal next move optimal_move = probs_calc.calc_next_move(this_board)", "the move these_probs.append( reoriented_candidates[this_game][\"game\"][num_moves]) # append the game submission data", "the right cell for the next move # get a", "get the right cell for the next move # get", "TRANSPOSE the current game state into 8 different games and", "need to look at losing and drawing games so that", "to be submitted to maximin maximin_list.append(these_probs) #print(\"maximin_list:\", maximin_list) # send", "these_probs = [] # get the probability element for the", "the detransposed recorded games for the next move recommended_move =", "candidate_games = ttt_base.get_games_list(bounds_list) #print(\"best_move: candidate_games =\", candidate_games) # if there", "game reoriented_candidates = reorient_games(tg_list, candidate_games) #print(\"best_move: number of reoriented_candidates games", "reorient_games from ttt_package.libs.calc_game_bound import calc_game_bound from ttt_package.libs.maxi_min import maximin #", "prior games in the game_history def best_move(this_board, agent, ttt_base, probs_calc):", "a draw/loss because of a # later bad move -", "candidate_games = [] lower_bound = 0 upper_bound = 0 #", "made early that resulted in a draw/loss because of a", "resulted in a draw/loss because of a # later bad", "right cell for the next move # get a list", "move about to be made num_moves = len(this_board) bounds_list =", "upper bound tuples for the tg_list using calc_game_bound for tgame", "get_transposed_games, reorient_games from ttt_package.libs.calc_game_bound import calc_game_bound from ttt_package.libs.maxi_min import maximin", "games = \", len(reoriented_candidates)) #print(\"best_move: number of candidate games =", "reoriented_candidates[this_game][\"game\"][num_moves]) # append the game submission data to the list", "current game in question # build a list of lower", "at least one game that matches the current game state", "the next move of this game candidate these_probs = reoriented_candidates[this_game][\"probs\"][num_moves].copy(", ") # tack on the cell # of the move", "the cell # of the move these_probs.append( reoriented_candidates[this_game][\"game\"][num_moves]) # append", "games from the game history # we need to look", "history # we need to look at losing and drawing", "probability element for the next move of this game candidate", "candidate these_probs = reoriented_candidates[this_game][\"probs\"][num_moves].copy( ) # tack on the cell", "move these_probs.append( reoriented_candidates[this_game][\"game\"][num_moves]) # append the game submission data to", "games so that we can thoroughly explore the action space", "= [] lower_bound = 0 upper_bound = 0 # note", "#print(\"best_move: tg_list =\", tg_list) # for each of the 8", "maximin(maximin_list) #print(\"best_move: move = \", recommended_move) return recommended_move else: #", "list of lower and upper bound tuples for the tg_list", "range(len(reoriented_candidates)): these_probs = [] # get the probability element for", "matches the current game state if candidate_games != False: #", "# note that len gives the number of the move", "move of this game candidate these_probs = reoriented_candidates[this_game][\"probs\"][num_moves].copy( ) #", "matching games in the game history #print(\"best_move: random choice...\") #", "from ttt_package.libs.calc_game_bound import calc_game_bound from ttt_package.libs.maxi_min import maximin # find", "next move optimal_move = probs_calc.calc_next_move(this_board) #print(\"This board =\", this_board) #print(\"Calculating", "of the move about to be made num_moves = len(this_board)", "list of probabilites of the detransposed recorded games for the", "recommended_move) return recommended_move else: # there are no matching games", "via backpropagation candidate_games = ttt_base.get_games_list(bounds_list) #print(\"best_move: candidate_games =\", candidate_games) #", "these_probs.append( reoriented_candidates[this_game][\"game\"][num_moves]) # append the game submission data to the", "submitted to maximin maximin_list.append(these_probs) #print(\"maximin_list:\", maximin_list) # send the list", "is a list of dictionaries that contain the transposed game", "can thoroughly explore the action space # we must avoid", "<gh_stars>0 # refactored from make_play to simplify # by Russell", "action space # we must avoid overlooking a good move", "#print(\"best_move: bounds_tuple =\", bounds_tuple) # fetch the list of candidate", "space # we must avoid overlooking a good move made", "detransposition games of the current game reoriented_candidates = reorient_games(tg_list, candidate_games)", "calc_game_bound from ttt_package.libs.maxi_min import maximin # find the best move", "= calc_game_bound(tgame[\"transpose\"], agent, 'L') upper_bound = calc_game_bound(tgame[\"transpose\"], agent, 'U') bounds_tuple", "recommended_move = maximin(maximin_list) #print(\"best_move: move = \", recommended_move) return recommended_move", "the best move for this agent, based on prior games", "agent, 'L') upper_bound = calc_game_bound(tgame[\"transpose\"], agent, 'U') bounds_tuple = (lower_bound,", "a list # the returned value is a list of", "Russell on 3/5/21 #from ttt_package.libs.move_utils import get_open_cells from ttt_package.libs.compare import", "we must avoid overlooking a good move made early that", "candidate games from the game history # we need to", "on 3/5/21 #from ttt_package.libs.move_utils import get_open_cells from ttt_package.libs.compare import get_transposed_games,", "move - these will be resolved later via backpropagation candidate_games", "\", len(candidate_games)) #print('best_move: reoriented_candidates =', reoriented_candidates) #print('best_move: candidate_games =', candidate_games)", "this_board:\", this_board) # TRANSPOSE the current game state into 8", "send the list of probabilites of the detransposed recorded games", "to simplify # by Russell on 3/5/21 #from ttt_package.libs.move_utils import", "and upper bound tuples for the tg_list using calc_game_bound for", "of this game candidate these_probs = reoriented_candidates[this_game][\"probs\"][num_moves].copy( ) # tack", "of candidate games = \", len(candidate_games)) #print('best_move: reoriented_candidates =', reoriented_candidates)", "of lower and upper bound tuples for the tg_list using", "question # build a list of lower and upper bound", "ttt_base.get_games_list(bounds_list) #print(\"best_move: candidate_games =\", candidate_games) # if there is at", "the next move recommended_move = maximin(maximin_list) #print(\"best_move: move = \",", "the matching detransposition games of the current game reoriented_candidates =", "bounds_tuple =\", bounds_tuple) # fetch the list of candidate games", "8 transposed versions of the current game in question #", "# and the source function, to allow the game to", "contain the transposed game # and the source function, to", "= get_transposed_games(this_board) #print(\"best_move: tg_list =\", tg_list) # for each of", "cell # of the move these_probs.append( reoriented_candidates[this_game][\"game\"][num_moves]) # append the", "of the detransposed recorded games for the next move recommended_move", "#from ttt_package.libs.move_utils import get_open_cells from ttt_package.libs.compare import get_transposed_games, reorient_games from", "# for each of the 8 transposed versions of the", "games that match the transposed game list # de-transpose the", "this agent, based on prior games in the game_history def", "# there are no matching games in the game history", "the game to be transposed back tg_list = get_transposed_games(this_board) #print(\"best_move:", "of the matching detransposition games of the current game reoriented_candidates", "value is a list of dictionaries that contain the transposed", "bad move - these will be resolved later via backpropagation", "match the transposed game list # de-transpose the candidate games", "avoid overlooking a good move made early that resulted in", "reoriented_candidates[this_game][\"probs\"][num_moves].copy( ) # tack on the cell # of the", "there are no matching games in the game history #print(\"best_move:", "agent, based on prior games in the game_history def best_move(this_board,", "using calc_game_bound for tgame in tg_list: lower_bound = calc_game_bound(tgame[\"transpose\"], agent,", "losing and drawing games so that we can thoroughly explore", "of the current game in question # build a list", "thoroughly explore the action space # we must avoid overlooking", "upper_bound = calc_game_bound(tgame[\"transpose\"], agent, 'U') bounds_tuple = (lower_bound, upper_bound) bounds_list.append(bounds_tuple)", "candidate_games =', candidate_games) maximin_list = [] # iterate though the", "return random_move(this_board) # estimate the optimal next move optimal_move =", "upper_bound) bounds_list.append(bounds_tuple) #print(\"best_move: bounds_tuple =\", bounds_tuple) # fetch the list", "transposed back tg_list = get_transposed_games(this_board) #print(\"best_move: tg_list =\", tg_list) #", "the list of probabilites of the detransposed recorded games for", "made num_moves = len(this_board) bounds_list = [] #print(\"best_move - this_board:\",", "else: # there are no matching games in the game", "len(candidate_games)) #print('best_move: reoriented_candidates =', reoriented_candidates) #print('best_move: candidate_games =', candidate_games) maximin_list", "0 upper_bound = 0 # note that len gives the", "# tack on the cell # of the move these_probs.append(", "history #print(\"best_move: random choice...\") # return random_move(this_board) # estimate the", "to be transposed back tg_list = get_transposed_games(this_board) #print(\"best_move: tg_list =\",", "#print('best_move: candidate_games =', candidate_games) maximin_list = [] # iterate though", "the tg_list using calc_game_bound for tgame in tg_list: lower_bound =", "there is at least one game that matches the current", "draw/loss because of a # later bad move - these", "resolved later via backpropagation candidate_games = ttt_base.get_games_list(bounds_list) #print(\"best_move: candidate_games =\",", "=', reoriented_candidates) #print('best_move: candidate_games =', candidate_games) maximin_list = [] #", "calc_game_bound for tgame in tg_list: lower_bound = calc_game_bound(tgame[\"transpose\"], agent, 'L')", "state into 8 different games and store in a list", "current game reoriented_candidates = reorient_games(tg_list, candidate_games) #print(\"best_move: number of reoriented_candidates", "= reorient_games(tg_list, candidate_games) #print(\"best_move: number of reoriented_candidates games = \",", "fetch the list of candidate games from the game history", "reorient_games(tg_list, candidate_games) #print(\"best_move: number of reoriented_candidates games = \", len(reoriented_candidates))", "transposed versions of the current game in question # build", "optimal_move = probs_calc.calc_next_move(this_board) #print(\"This board =\", this_board) #print(\"Calculating optimal move", "= len(this_board) bounds_list = [] #print(\"best_move - this_board:\", this_board) #", "simplify # by Russell on 3/5/21 #from ttt_package.libs.move_utils import get_open_cells", "the move about to be made num_moves = len(this_board) bounds_list", "# by Russell on 3/5/21 #from ttt_package.libs.move_utils import get_open_cells from", "games in the game_history def best_move(this_board, agent, ttt_base, probs_calc): candidate_games", "a list of lower and upper bound tuples for the", "of games that match the transposed game list # de-transpose", "tg_list =\", tg_list) # for each of the 8 transposed", "a # later bad move - these will be resolved", "def best_move(this_board, agent, ttt_base, probs_calc): candidate_games = [] lower_bound =", "of candidate games from the game history # we need", "candidate_games != False: # this is the list of games", "'L') upper_bound = calc_game_bound(tgame[\"transpose\"], agent, 'U') bounds_tuple = (lower_bound, upper_bound)", "find the best move for this agent, based on prior", "least one game that matches the current game state if", "that resulted in a draw/loss because of a # later", "# refactored from make_play to simplify # by Russell on", "allow the game to be transposed back tg_list = get_transposed_games(this_board)", "overlooking a good move made early that resulted in a", "be transposed back tg_list = get_transposed_games(this_board) #print(\"best_move: tg_list =\", tg_list)", "game history # we need to look at losing and", "best move for this agent, based on prior games in", "the action space # we must avoid overlooking a good", "iterate though the game candidates for this_game in range(len(reoriented_candidates)): these_probs", "this game candidate these_probs = reoriented_candidates[this_game][\"probs\"][num_moves].copy( ) # tack on", "is at least one game that matches the current game", "the game candidates for this_game in range(len(reoriented_candidates)): these_probs = []", "# find the best move for this agent, based on", "games and store in a list # the returned value", "for tgame in tg_list: lower_bound = calc_game_bound(tgame[\"transpose\"], agent, 'L') upper_bound", "#print(\"best_move - this_board:\", this_board) # TRANSPOSE the current game state", "maximin_list) # send the list of probabilites of the detransposed", "candidate games to get the right cell for the next", "by Russell on 3/5/21 #from ttt_package.libs.move_utils import get_open_cells from ttt_package.libs.compare", "to be made num_moves = len(this_board) bounds_list = [] #print(\"best_move", "tg_list = get_transposed_games(this_board) #print(\"best_move: tg_list =\", tg_list) # for each", "from the game history # we need to look at", "agent, 'U') bounds_tuple = (lower_bound, upper_bound) bounds_list.append(bounds_tuple) #print(\"best_move: bounds_tuple =\",", "candidate_games =\", candidate_games) # if there is at least one", "the number of the move about to be made num_moves", "in tg_list: lower_bound = calc_game_bound(tgame[\"transpose\"], agent, 'L') upper_bound = calc_game_bound(tgame[\"transpose\"],", "explore the action space # we must avoid overlooking a", "the game history # we need to look at losing", "of dictionaries that contain the transposed game # and the", "get the probability element for the next move of this", "number of candidate games = \", len(candidate_games)) #print('best_move: reoriented_candidates =',", "= maximin(maximin_list) #print(\"best_move: move = \", recommended_move) return recommended_move else:", "# get a list of the matching detransposition games of", "'U') bounds_tuple = (lower_bound, upper_bound) bounds_list.append(bounds_tuple) #print(\"best_move: bounds_tuple =\", bounds_tuple)", "= [] #print(\"best_move - this_board:\", this_board) # TRANSPOSE the current", "= probs_calc.calc_next_move(this_board) #print(\"This board =\", this_board) #print(\"Calculating optimal move =\",", "backpropagation candidate_games = ttt_base.get_games_list(bounds_list) #print(\"best_move: candidate_games =\", candidate_games) # if", "!= False: # this is the list of games that", "gives the number of the move about to be made", "import calc_game_bound from ttt_package.libs.maxi_min import maximin # find the best", "list of dictionaries that contain the transposed game # and", "# of the move these_probs.append( reoriented_candidates[this_game][\"game\"][num_moves]) # append the game", "candidate_games) #print(\"best_move: number of reoriented_candidates games = \", len(reoriented_candidates)) #print(\"best_move:", "for the next move of this game candidate these_probs =", "must avoid overlooking a good move made early that resulted", "= calc_game_bound(tgame[\"transpose\"], agent, 'U') bounds_tuple = (lower_bound, upper_bound) bounds_list.append(bounds_tuple) #print(\"best_move:", "to get the right cell for the next move #", "that contain the transposed game # and the source function,", "though the game candidates for this_game in range(len(reoriented_candidates)): these_probs =", "move optimal_move = probs_calc.calc_next_move(this_board) #print(\"This board =\", this_board) #print(\"Calculating optimal", "a list of the matching detransposition games of the current", "from ttt_package.libs.compare import get_transposed_games, reorient_games from ttt_package.libs.calc_game_bound import calc_game_bound from", "upper_bound = 0 # note that len gives the number", "# the returned value is a list of dictionaries that", "8 different games and store in a list # the", "list to be submitted to maximin maximin_list.append(these_probs) #print(\"maximin_list:\", maximin_list) #", "to the list to be submitted to maximin maximin_list.append(these_probs) #print(\"maximin_list:\",", "transposed game # and the source function, to allow the", "game state if candidate_games != False: # this is the", "versions of the current game in question # build a", "candidate_games) # if there is at least one game that", "we can thoroughly explore the action space # we must", "at losing and drawing games so that we can thoroughly", "calc_game_bound(tgame[\"transpose\"], agent, 'L') upper_bound = calc_game_bound(tgame[\"transpose\"], agent, 'U') bounds_tuple =", "the game submission data to the list to be submitted", "number of reoriented_candidates games = \", len(reoriented_candidates)) #print(\"best_move: number of", "= (lower_bound, upper_bound) bounds_list.append(bounds_tuple) #print(\"best_move: bounds_tuple =\", bounds_tuple) # fetch", "False: # this is the list of games that match", "from ttt_package.libs.maxi_min import maximin # find the best move for", "are no matching games in the game history #print(\"best_move: random", "import get_transposed_games, reorient_games from ttt_package.libs.calc_game_bound import calc_game_bound from ttt_package.libs.maxi_min import", "probs_calc.calc_next_move(this_board) #print(\"This board =\", this_board) #print(\"Calculating optimal move =\", optimal_move)", "in the game history #print(\"best_move: random choice...\") # return random_move(this_board)", "# get the probability element for the next move of", "these will be resolved later via backpropagation candidate_games = ttt_base.get_games_list(bounds_list)", "returned value is a list of dictionaries that contain the", "0 # note that len gives the number of the", "the candidate games to get the right cell for the", "the source function, to allow the game to be transposed", "de-transpose the candidate games to get the right cell for", "= \", len(candidate_games)) #print('best_move: reoriented_candidates =', reoriented_candidates) #print('best_move: candidate_games =',", "lower and upper bound tuples for the tg_list using calc_game_bound", "game state into 8 different games and store in a", "the current game in question # build a list of", "cell for the next move # get a list of", "that matches the current game state if candidate_games != False:", "game to be transposed back tg_list = get_transposed_games(this_board) #print(\"best_move: tg_list", "=', candidate_games) maximin_list = [] # iterate though the game", "on the cell # of the move these_probs.append( reoriented_candidates[this_game][\"game\"][num_moves]) #", "the list of candidate games from the game history #", "a good move made early that resulted in a draw/loss", "for this agent, based on prior games in the game_history", "if candidate_games != False: # this is the list of", "function, to allow the game to be transposed back tg_list", "random choice...\") # return random_move(this_board) # estimate the optimal next", "back tg_list = get_transposed_games(this_board) #print(\"best_move: tg_list =\", tg_list) # for", "bounds_tuple) # fetch the list of candidate games from the", "ttt_package.libs.maxi_min import maximin # find the best move for this", "# this is the list of games that match the", "to maximin maximin_list.append(these_probs) #print(\"maximin_list:\", maximin_list) # send the list of", "=\", candidate_games) # if there is at least one game", "#print(\"maximin_list:\", maximin_list) # send the list of probabilites of the", "# estimate the optimal next move optimal_move = probs_calc.calc_next_move(this_board) #print(\"This", "that len gives the number of the move about to", "lower_bound = calc_game_bound(tgame[\"transpose\"], agent, 'L') upper_bound = calc_game_bound(tgame[\"transpose\"], agent, 'U')", "= reoriented_candidates[this_game][\"probs\"][num_moves].copy( ) # tack on the cell # of", "refactored from make_play to simplify # by Russell on 3/5/21", "# send the list of probabilites of the detransposed recorded", "games of the current game reoriented_candidates = reorient_games(tg_list, candidate_games) #print(\"best_move:" ]
[ "# python 线程测试 import _thread import time from yvhai.demo.base import", "in range(times): time.sleep(interval) print(\" -- %s: %s\" % (thread_name, time.ctime(time.time())))", "print_time(thread_name, interval, times): for cnt in range(times): time.sleep(interval) print(\" --", "6)) except: print(\"Error: 无法启动线程\") # 主线程无限等待 while 1: pass @staticmethod", "time.ctime(time.time()))) class RawThreadDemo(YHDemo): def __init__(self): super(RawThreadDemo, self).__init__('_thread') @staticmethod def main():", "YHDemo def print_time(thread_name, interval, times): for cnt in range(times): time.sleep(interval)", "#!/usr/bin/env python3 # python 线程测试 import _thread import time from", "# 主线程无限等待 while 1: pass @staticmethod def demo(args=[]): RawThreadDemo.main() if", "线程测试 import _thread import time from yvhai.demo.base import YHDemo def", "def print_time(thread_name, interval, times): for cnt in range(times): time.sleep(interval) print(\"", "_thread.start_new_thread(print_time, (\"Thread-02\", 2, 6)) except: print(\"Error: 无法启动线程\") # 主线程无限等待 while", "while 1: pass @staticmethod def demo(args=[]): RawThreadDemo.main() if __name__ ==", "RawThreadDemo(YHDemo): def __init__(self): super(RawThreadDemo, self).__init__('_thread') @staticmethod def main(): try: _thread.start_new_thread(print_time,", "super(RawThreadDemo, self).__init__('_thread') @staticmethod def main(): try: _thread.start_new_thread(print_time, (\"Thread-01\", 1, 10))", "python3 # python 线程测试 import _thread import time from yvhai.demo.base", "import YHDemo def print_time(thread_name, interval, times): for cnt in range(times):", "times): for cnt in range(times): time.sleep(interval) print(\" -- %s: %s\"", "@staticmethod def main(): try: _thread.start_new_thread(print_time, (\"Thread-01\", 1, 10)) _thread.start_new_thread(print_time, (\"Thread-02\",", "(\"Thread-02\", 2, 6)) except: print(\"Error: 无法启动线程\") # 主线程无限等待 while 1:", "print(\"Error: 无法启动线程\") # 主线程无限等待 while 1: pass @staticmethod def demo(args=[]):", "from yvhai.demo.base import YHDemo def print_time(thread_name, interval, times): for cnt", "cnt in range(times): time.sleep(interval) print(\" -- %s: %s\" % (thread_name,", "python 线程测试 import _thread import time from yvhai.demo.base import YHDemo", "print(\" -- %s: %s\" % (thread_name, time.ctime(time.time()))) class RawThreadDemo(YHDemo): def", "% (thread_name, time.ctime(time.time()))) class RawThreadDemo(YHDemo): def __init__(self): super(RawThreadDemo, self).__init__('_thread') @staticmethod", "class RawThreadDemo(YHDemo): def __init__(self): super(RawThreadDemo, self).__init__('_thread') @staticmethod def main(): try:", "def main(): try: _thread.start_new_thread(print_time, (\"Thread-01\", 1, 10)) _thread.start_new_thread(print_time, (\"Thread-02\", 2,", "_thread.start_new_thread(print_time, (\"Thread-01\", 1, 10)) _thread.start_new_thread(print_time, (\"Thread-02\", 2, 6)) except: print(\"Error:", "_thread import time from yvhai.demo.base import YHDemo def print_time(thread_name, interval,", "yvhai.demo.base import YHDemo def print_time(thread_name, interval, times): for cnt in", "1: pass @staticmethod def demo(args=[]): RawThreadDemo.main() if __name__ == '__main__':", "(thread_name, time.ctime(time.time()))) class RawThreadDemo(YHDemo): def __init__(self): super(RawThreadDemo, self).__init__('_thread') @staticmethod def", "-- %s: %s\" % (thread_name, time.ctime(time.time()))) class RawThreadDemo(YHDemo): def __init__(self):", "self).__init__('_thread') @staticmethod def main(): try: _thread.start_new_thread(print_time, (\"Thread-01\", 1, 10)) _thread.start_new_thread(print_time,", "range(times): time.sleep(interval) print(\" -- %s: %s\" % (thread_name, time.ctime(time.time()))) class", "2, 6)) except: print(\"Error: 无法启动线程\") # 主线程无限等待 while 1: pass", "1, 10)) _thread.start_new_thread(print_time, (\"Thread-02\", 2, 6)) except: print(\"Error: 无法启动线程\") #", "pass @staticmethod def demo(args=[]): RawThreadDemo.main() if __name__ == '__main__': RawThreadDemo.demo()", "(\"Thread-01\", 1, 10)) _thread.start_new_thread(print_time, (\"Thread-02\", 2, 6)) except: print(\"Error: 无法启动线程\")", "try: _thread.start_new_thread(print_time, (\"Thread-01\", 1, 10)) _thread.start_new_thread(print_time, (\"Thread-02\", 2, 6)) except:", "无法启动线程\") # 主线程无限等待 while 1: pass @staticmethod def demo(args=[]): RawThreadDemo.main()", "for cnt in range(times): time.sleep(interval) print(\" -- %s: %s\" %", "import time from yvhai.demo.base import YHDemo def print_time(thread_name, interval, times):", "%s: %s\" % (thread_name, time.ctime(time.time()))) class RawThreadDemo(YHDemo): def __init__(self): super(RawThreadDemo,", "%s\" % (thread_name, time.ctime(time.time()))) class RawThreadDemo(YHDemo): def __init__(self): super(RawThreadDemo, self).__init__('_thread')", "except: print(\"Error: 无法启动线程\") # 主线程无限等待 while 1: pass @staticmethod def", "time from yvhai.demo.base import YHDemo def print_time(thread_name, interval, times): for", "interval, times): for cnt in range(times): time.sleep(interval) print(\" -- %s:", "__init__(self): super(RawThreadDemo, self).__init__('_thread') @staticmethod def main(): try: _thread.start_new_thread(print_time, (\"Thread-01\", 1,", "main(): try: _thread.start_new_thread(print_time, (\"Thread-01\", 1, 10)) _thread.start_new_thread(print_time, (\"Thread-02\", 2, 6))", "def __init__(self): super(RawThreadDemo, self).__init__('_thread') @staticmethod def main(): try: _thread.start_new_thread(print_time, (\"Thread-01\",", "10)) _thread.start_new_thread(print_time, (\"Thread-02\", 2, 6)) except: print(\"Error: 无法启动线程\") # 主线程无限等待", "主线程无限等待 while 1: pass @staticmethod def demo(args=[]): RawThreadDemo.main() if __name__", "import _thread import time from yvhai.demo.base import YHDemo def print_time(thread_name,", "time.sleep(interval) print(\" -- %s: %s\" % (thread_name, time.ctime(time.time()))) class RawThreadDemo(YHDemo):" ]
[ "\"similarity_type\" LOSS_TYPE = \"loss_type\" NUM_NEG = \"number_of_negative_examples\" MAX_POS_SIM = \"maximum_positive_similarity\"", "constants for configuration parameters of our tensorflow models LABEL =", "by tf.one_hot. LABEL_PAD_ID = -1 HIDDEN_LAYERS_SIZES = \"hidden_layers_sizes\" SHARE_HIDDEN_LAYERS =", "\"number_of_negative_examples\" MAX_POS_SIM = \"maximum_positive_similarity\" MAX_NEG_SIM = \"maximum_negative_similarity\" USE_MAX_NEG_SIM = \"use_maximum_negative_similarity\"", "DROP_RATE_DIALOGUE = \"drop_rate_dialogue\" DROP_RATE_LABEL = \"drop_rate_label\" CONSTRAIN_SIMILARITIES = \"constrain_similarities\" WEIGHT_SPARSITY", "= \"auto\" INNER = \"inner\" LINEAR_NORM = \"linear_norm\" COSINE =", "\"use_sparse_input_dropout\" DENSE_INPUT_DROPOUT = \"use_dense_input_dropout\" RANKING_LENGTH = \"ranking_length\" MODEL_CONFIDENCE = \"model_confidence\"", "= \"sentence_features\" FEATURIZERS = \"featurizers\" CHECKPOINT_MODEL = \"checkpoint_model\" MASK =", "\"connection_density\" EVAL_NUM_EPOCHS = \"evaluate_every_number_of_epochs\" EVAL_NUM_EXAMPLES = \"evaluate_on_number_of_examples\" INTENT_CLASSIFICATION = \"intent_classification\"", "FEATURIZERS = \"featurizers\" CHECKPOINT_MODEL = \"checkpoint_model\" MASK = \"mask\" IGNORE_INTENTS_LIST", "LABEL_PAD_ID = -1 HIDDEN_LAYERS_SIZES = \"hidden_layers_sizes\" SHARE_HIDDEN_LAYERS = \"share_hidden_layers\" TRANSFORMER_SIZE", "= \"batch_strategy\" EPOCHS = \"epochs\" RANDOM_SEED = \"random_seed\" LEARNING_RATE =", "of our tensorflow models LABEL = \"label\" IDS = \"ids\"", "REGULARIZATION_CONSTANT = \"regularization_constant\" NEGATIVE_MARGIN_SCALE = \"negative_margin_scale\" DROP_RATE = \"drop_rate\" DROP_RATE_ATTENTION", "\"score\" THRESHOLD_KEY = \"threshold\" SEVERITY_KEY = \"severity\" NAME = \"name\"", "\"use_value_relative_attention\" MAX_RELATIVE_POSITION = \"max_relative_position\" BATCH_SIZES = \"batch_size\" BATCH_STRATEGY = \"batch_strategy\"", "SEQUENCE_LENGTH = f\"{SEQUENCE}_lengths\" SENTENCE = \"sentence\" POOLING = \"pooling\" MAX_POOLING", "\"evaluate_on_number_of_examples\" INTENT_CLASSIFICATION = \"intent_classification\" ENTITY_RECOGNITION = \"entity_recognition\" MASKED_LM = \"use_masked_language_model\"", "\"drop_rate_dialogue\" DROP_RATE_LABEL = \"drop_rate_label\" CONSTRAIN_SIMILARITIES = \"constrain_similarities\" WEIGHT_SPARSITY = \"weight_sparsity\"", "\"linear_norm\" COSINE = \"cosine\" CROSS_ENTROPY = \"cross_entropy\" BALANCED = \"balanced\"", "POSITIVE_SCORES_KEY = \"positive_scores\" NEGATIVE_SCORES_KEY = \"negative_scores\" RANKING_KEY = \"label_ranking\" QUERY_INTENT_KEY", "= \"use_key_relative_attention\" VALUE_RELATIVE_ATTENTION = \"use_value_relative_attention\" MAX_RELATIVE_POSITION = \"max_relative_position\" BATCH_SIZES =", "= \"retrieval_intent\" USE_TEXT_AS_LABEL = \"use_text_as_label\" SOFTMAX = \"softmax\" MARGIN =", "= \"linear_norm\" COSINE = \"cosine\" CROSS_ENTROPY = \"cross_entropy\" BALANCED =", "\"learning_rate\" DENSE_DIMENSION = \"dense_dimension\" CONCAT_DIMENSION = \"concat_dimension\" EMBEDDING_DIMENSION = \"embedding_dimension\"", "COSINE = \"cosine\" CROSS_ENTROPY = \"cross_entropy\" BALANCED = \"balanced\" SEQUENCE", "= \"encoding_dimension\" SIMILARITY_TYPE = \"similarity_type\" LOSS_TYPE = \"loss_type\" NUM_NEG =", "= \"use_masked_language_model\" SPARSE_INPUT_DROPOUT = \"use_sparse_input_dropout\" DENSE_INPUT_DROPOUT = \"use_dense_input_dropout\" RANKING_LENGTH =", "= \"sequence\" SEQUENCE_LENGTH = f\"{SEQUENCE}_lengths\" SENTENCE = \"sentence\" POOLING =", "by CONNECTION_DENSITY CONNECTION_DENSITY = \"connection_density\" EVAL_NUM_EPOCHS = \"evaluate_every_number_of_epochs\" EVAL_NUM_EXAMPLES =", "BILOU_FLAG = \"BILOU_flag\" RETRIEVAL_INTENT = \"retrieval_intent\" USE_TEXT_AS_LABEL = \"use_text_as_label\" SOFTMAX", "\"evaluate_every_number_of_epochs\" EVAL_NUM_EXAMPLES = \"evaluate_on_number_of_examples\" INTENT_CLASSIFICATION = \"intent_classification\" ENTITY_RECOGNITION = \"entity_recognition\"", "DROP_RATE_LABEL = \"drop_rate_label\" CONSTRAIN_SIMILARITIES = \"constrain_similarities\" WEIGHT_SPARSITY = \"weight_sparsity\" #", "WEIGHT_SPARSITY = \"weight_sparsity\" # Deprecated and superseeded by CONNECTION_DENSITY CONNECTION_DENSITY", "= \"checkpoint_model\" MASK = \"mask\" IGNORE_INTENTS_LIST = \"ignore_intents_list\" TOLERANCE =", "\"negative_margin_scale\" DROP_RATE = \"drop_rate\" DROP_RATE_ATTENTION = \"drop_rate_attention\" DROP_RATE_DIALOGUE = \"drop_rate_dialogue\"", "= \"positive_scores\" NEGATIVE_SCORES_KEY = \"negative_scores\" RANKING_KEY = \"label_ranking\" QUERY_INTENT_KEY =", "\"sequence\" SEQUENCE_LENGTH = f\"{SEQUENCE}_lengths\" SENTENCE = \"sentence\" POOLING = \"pooling\"", "\"regularization_constant\" NEGATIVE_MARGIN_SCALE = \"negative_margin_scale\" DROP_RATE = \"drop_rate\" DROP_RATE_ATTENTION = \"drop_rate_attention\"", "NEGATIVE_MARGIN_SCALE = \"negative_margin_scale\" DROP_RATE = \"drop_rate\" DROP_RATE_ATTENTION = \"drop_rate_attention\" DROP_RATE_DIALOGUE", "\"featurizers\" CHECKPOINT_MODEL = \"checkpoint_model\" MASK = \"mask\" IGNORE_INTENTS_LIST = \"ignore_intents_list\"", "= \"number_of_attention_heads\" UNIDIRECTIONAL_ENCODER = \"unidirectional_encoder\" KEY_RELATIVE_ATTENTION = \"use_key_relative_attention\" VALUE_RELATIVE_ATTENTION =", "= \"max_relative_position\" BATCH_SIZES = \"batch_size\" BATCH_STRATEGY = \"batch_strategy\" EPOCHS =", "= \"tolerance\" POSITIVE_SCORES_KEY = \"positive_scores\" NEGATIVE_SCORES_KEY = \"negative_scores\" RANKING_KEY =", "= \"softmax\" MARGIN = \"margin\" AUTO = \"auto\" INNER =", "= \"drop_rate\" DROP_RATE_ATTENTION = \"drop_rate_attention\" DROP_RATE_DIALOGUE = \"drop_rate_dialogue\" DROP_RATE_LABEL =", "\"softmax\" MARGIN = \"margin\" AUTO = \"auto\" INNER = \"inner\"", "SENTENCE_FEATURES = \"sentence_features\" FEATURIZERS = \"featurizers\" CHECKPOINT_MODEL = \"checkpoint_model\" MASK", "= \"BILOU_flag\" RETRIEVAL_INTENT = \"retrieval_intent\" USE_TEXT_AS_LABEL = \"use_text_as_label\" SOFTMAX =", "= \"connection_density\" EVAL_NUM_EPOCHS = \"evaluate_every_number_of_epochs\" EVAL_NUM_EXAMPLES = \"evaluate_on_number_of_examples\" INTENT_CLASSIFICATION =", "RANKING_KEY = \"label_ranking\" QUERY_INTENT_KEY = \"query_intent\" SCORE_KEY = \"score\" THRESHOLD_KEY", "= \"evaluate_on_number_of_examples\" INTENT_CLASSIFICATION = \"intent_classification\" ENTITY_RECOGNITION = \"entity_recognition\" MASKED_LM =", "= \"model_confidence\" BILOU_FLAG = \"BILOU_flag\" RETRIEVAL_INTENT = \"retrieval_intent\" USE_TEXT_AS_LABEL =", "\"share_hidden_layers\" TRANSFORMER_SIZE = \"transformer_size\" NUM_TRANSFORMER_LAYERS = \"number_of_transformer_layers\" NUM_HEADS = \"number_of_attention_heads\"", "= \"label_ranking\" QUERY_INTENT_KEY = \"query_intent\" SCORE_KEY = \"score\" THRESHOLD_KEY =", "= \"threshold\" SEVERITY_KEY = \"severity\" NAME = \"name\" EPOCH_OVERRIDE =", "\"epochs\" RANDOM_SEED = \"random_seed\" LEARNING_RATE = \"learning_rate\" DENSE_DIMENSION = \"dense_dimension\"", "= \"number_of_negative_examples\" MAX_POS_SIM = \"maximum_positive_similarity\" MAX_NEG_SIM = \"maximum_negative_similarity\" USE_MAX_NEG_SIM =", "= \"mean\" TENSORBOARD_LOG_DIR = \"tensorboard_log_directory\" TENSORBOARD_LOG_LEVEL = \"tensorboard_log_level\" SEQUENCE_FEATURES =", "= \"hidden_layers_sizes\" SHARE_HIDDEN_LAYERS = \"share_hidden_layers\" TRANSFORMER_SIZE = \"transformer_size\" NUM_TRANSFORMER_LAYERS =", "TENSORBOARD_LOG_LEVEL = \"tensorboard_log_level\" SEQUENCE_FEATURES = \"sequence_features\" SENTENCE_FEATURES = \"sentence_features\" FEATURIZERS", "# Deprecated and superseeded by CONNECTION_DENSITY CONNECTION_DENSITY = \"connection_density\" EVAL_NUM_EPOCHS", "be < 0 to avoid index out of bounds errors", "\"hidden_layers_sizes\" SHARE_HIDDEN_LAYERS = \"share_hidden_layers\" TRANSFORMER_SIZE = \"transformer_size\" NUM_TRANSFORMER_LAYERS = \"number_of_transformer_layers\"", "= f\"{SEQUENCE}_lengths\" SENTENCE = \"sentence\" POOLING = \"pooling\" MAX_POOLING =", "DROP_RATE = \"drop_rate\" DROP_RATE_ATTENTION = \"drop_rate_attention\" DROP_RATE_DIALOGUE = \"drop_rate_dialogue\" DROP_RATE_LABEL", "\"pooling\" MAX_POOLING = \"max\" MEAN_POOLING = \"mean\" TENSORBOARD_LOG_DIR = \"tensorboard_log_directory\"", "configuration parameters of our tensorflow models LABEL = \"label\" IDS", "models LABEL = \"label\" IDS = \"ids\" # LABEL_PAD_ID is", "< 0 to avoid index out of bounds errors by", "index out of bounds errors by tf.one_hot. LABEL_PAD_ID = -1", "\"sentence\" POOLING = \"pooling\" MAX_POOLING = \"max\" MEAN_POOLING = \"mean\"", "= \"negative_scores\" RANKING_KEY = \"label_ranking\" QUERY_INTENT_KEY = \"query_intent\" SCORE_KEY =", "\"model_confidence\" BILOU_FLAG = \"BILOU_flag\" RETRIEVAL_INTENT = \"retrieval_intent\" USE_TEXT_AS_LABEL = \"use_text_as_label\"", "\"mask\" IGNORE_INTENTS_LIST = \"ignore_intents_list\" TOLERANCE = \"tolerance\" POSITIVE_SCORES_KEY = \"positive_scores\"", "CHECKPOINT_MODEL = \"checkpoint_model\" MASK = \"mask\" IGNORE_INTENTS_LIST = \"ignore_intents_list\" TOLERANCE", "= \"balanced\" SEQUENCE = \"sequence\" SEQUENCE_LENGTH = f\"{SEQUENCE}_lengths\" SENTENCE =", "= \"epochs\" RANDOM_SEED = \"random_seed\" LEARNING_RATE = \"learning_rate\" DENSE_DIMENSION =", "f\"{SEQUENCE}_lengths\" SENTENCE = \"sentence\" POOLING = \"pooling\" MAX_POOLING = \"max\"", "\"entity_recognition\" MASKED_LM = \"use_masked_language_model\" SPARSE_INPUT_DROPOUT = \"use_sparse_input_dropout\" DENSE_INPUT_DROPOUT = \"use_dense_input_dropout\"", "examples. # It should be < 0 to avoid index", "\"retrieval_intent\" USE_TEXT_AS_LABEL = \"use_text_as_label\" SOFTMAX = \"softmax\" MARGIN = \"margin\"", "MASK = \"mask\" IGNORE_INTENTS_LIST = \"ignore_intents_list\" TOLERANCE = \"tolerance\" POSITIVE_SCORES_KEY", "SPARSE_INPUT_DROPOUT = \"use_sparse_input_dropout\" DENSE_INPUT_DROPOUT = \"use_dense_input_dropout\" RANKING_LENGTH = \"ranking_length\" MODEL_CONFIDENCE", "SHARE_HIDDEN_LAYERS = \"share_hidden_layers\" TRANSFORMER_SIZE = \"transformer_size\" NUM_TRANSFORMER_LAYERS = \"number_of_transformer_layers\" NUM_HEADS", "\"maximum_negative_similarity\" USE_MAX_NEG_SIM = \"use_maximum_negative_similarity\" SCALE_LOSS = \"scale_loss\" REGULARIZATION_CONSTANT = \"regularization_constant\"", "= \"random_seed\" LEARNING_RATE = \"learning_rate\" DENSE_DIMENSION = \"dense_dimension\" CONCAT_DIMENSION =", "SEQUENCE = \"sequence\" SEQUENCE_LENGTH = f\"{SEQUENCE}_lengths\" SENTENCE = \"sentence\" POOLING", "TRANSFORMER_SIZE = \"transformer_size\" NUM_TRANSFORMER_LAYERS = \"number_of_transformer_layers\" NUM_HEADS = \"number_of_attention_heads\" UNIDIRECTIONAL_ENCODER", "= \"use_sparse_input_dropout\" DENSE_INPUT_DROPOUT = \"use_dense_input_dropout\" RANKING_LENGTH = \"ranking_length\" MODEL_CONFIDENCE =", "is used to pad multi-label training examples. # It should", "DENSE_DIMENSION = \"dense_dimension\" CONCAT_DIMENSION = \"concat_dimension\" EMBEDDING_DIMENSION = \"embedding_dimension\" ENCODING_DIMENSION", "\"embedding_dimension\" ENCODING_DIMENSION = \"encoding_dimension\" SIMILARITY_TYPE = \"similarity_type\" LOSS_TYPE = \"loss_type\"", "EPOCHS = \"epochs\" RANDOM_SEED = \"random_seed\" LEARNING_RATE = \"learning_rate\" DENSE_DIMENSION", "\"use_dense_input_dropout\" RANKING_LENGTH = \"ranking_length\" MODEL_CONFIDENCE = \"model_confidence\" BILOU_FLAG = \"BILOU_flag\"", "\"use_text_as_label\" SOFTMAX = \"softmax\" MARGIN = \"margin\" AUTO = \"auto\"", "MAX_POS_SIM = \"maximum_positive_similarity\" MAX_NEG_SIM = \"maximum_negative_similarity\" USE_MAX_NEG_SIM = \"use_maximum_negative_similarity\" SCALE_LOSS", "= \"cross_entropy\" BALANCED = \"balanced\" SEQUENCE = \"sequence\" SEQUENCE_LENGTH =", "= \"tensorboard_log_level\" SEQUENCE_FEATURES = \"sequence_features\" SENTENCE_FEATURES = \"sentence_features\" FEATURIZERS =", "\"use_masked_language_model\" SPARSE_INPUT_DROPOUT = \"use_sparse_input_dropout\" DENSE_INPUT_DROPOUT = \"use_dense_input_dropout\" RANKING_LENGTH = \"ranking_length\"", "MODEL_CONFIDENCE = \"model_confidence\" BILOU_FLAG = \"BILOU_flag\" RETRIEVAL_INTENT = \"retrieval_intent\" USE_TEXT_AS_LABEL", "= \"entity_recognition\" MASKED_LM = \"use_masked_language_model\" SPARSE_INPUT_DROPOUT = \"use_sparse_input_dropout\" DENSE_INPUT_DROPOUT =", "QUERY_INTENT_KEY = \"query_intent\" SCORE_KEY = \"score\" THRESHOLD_KEY = \"threshold\" SEVERITY_KEY", "\"checkpoint_model\" MASK = \"mask\" IGNORE_INTENTS_LIST = \"ignore_intents_list\" TOLERANCE = \"tolerance\"", "= \"negative_margin_scale\" DROP_RATE = \"drop_rate\" DROP_RATE_ATTENTION = \"drop_rate_attention\" DROP_RATE_DIALOGUE =", "\"cosine\" CROSS_ENTROPY = \"cross_entropy\" BALANCED = \"balanced\" SEQUENCE = \"sequence\"", "\"threshold\" SEVERITY_KEY = \"severity\" NAME = \"name\" EPOCH_OVERRIDE = \"epoch_override\"", "\"max_relative_position\" BATCH_SIZES = \"batch_size\" BATCH_STRATEGY = \"batch_strategy\" EPOCHS = \"epochs\"", "MAX_POOLING = \"max\" MEAN_POOLING = \"mean\" TENSORBOARD_LOG_DIR = \"tensorboard_log_directory\" TENSORBOARD_LOG_LEVEL", "\"use_key_relative_attention\" VALUE_RELATIVE_ATTENTION = \"use_value_relative_attention\" MAX_RELATIVE_POSITION = \"max_relative_position\" BATCH_SIZES = \"batch_size\"", "training examples. # It should be < 0 to avoid", "MEAN_POOLING = \"mean\" TENSORBOARD_LOG_DIR = \"tensorboard_log_directory\" TENSORBOARD_LOG_LEVEL = \"tensorboard_log_level\" SEQUENCE_FEATURES", "= \"ignore_intents_list\" TOLERANCE = \"tolerance\" POSITIVE_SCORES_KEY = \"positive_scores\" NEGATIVE_SCORES_KEY =", "IDS = \"ids\" # LABEL_PAD_ID is used to pad multi-label", "TENSORBOARD_LOG_DIR = \"tensorboard_log_directory\" TENSORBOARD_LOG_LEVEL = \"tensorboard_log_level\" SEQUENCE_FEATURES = \"sequence_features\" SENTENCE_FEATURES", "\"max\" MEAN_POOLING = \"mean\" TENSORBOARD_LOG_DIR = \"tensorboard_log_directory\" TENSORBOARD_LOG_LEVEL = \"tensorboard_log_level\"", "SOFTMAX = \"softmax\" MARGIN = \"margin\" AUTO = \"auto\" INNER", "Deprecated and superseeded by CONNECTION_DENSITY CONNECTION_DENSITY = \"connection_density\" EVAL_NUM_EPOCHS =", "# It should be < 0 to avoid index out", "USE_TEXT_AS_LABEL = \"use_text_as_label\" SOFTMAX = \"softmax\" MARGIN = \"margin\" AUTO", "BATCH_STRATEGY = \"batch_strategy\" EPOCHS = \"epochs\" RANDOM_SEED = \"random_seed\" LEARNING_RATE", "# LABEL_PAD_ID is used to pad multi-label training examples. #", "MASKED_LM = \"use_masked_language_model\" SPARSE_INPUT_DROPOUT = \"use_sparse_input_dropout\" DENSE_INPUT_DROPOUT = \"use_dense_input_dropout\" RANKING_LENGTH", "\"tensorboard_log_directory\" TENSORBOARD_LOG_LEVEL = \"tensorboard_log_level\" SEQUENCE_FEATURES = \"sequence_features\" SENTENCE_FEATURES = \"sentence_features\"", "\"dense_dimension\" CONCAT_DIMENSION = \"concat_dimension\" EMBEDDING_DIMENSION = \"embedding_dimension\" ENCODING_DIMENSION = \"encoding_dimension\"", "= \"regularization_constant\" NEGATIVE_MARGIN_SCALE = \"negative_margin_scale\" DROP_RATE = \"drop_rate\" DROP_RATE_ATTENTION =", "\"tolerance\" POSITIVE_SCORES_KEY = \"positive_scores\" NEGATIVE_SCORES_KEY = \"negative_scores\" RANKING_KEY = \"label_ranking\"", "\"mean\" TENSORBOARD_LOG_DIR = \"tensorboard_log_directory\" TENSORBOARD_LOG_LEVEL = \"tensorboard_log_level\" SEQUENCE_FEATURES = \"sequence_features\"", "\"margin\" AUTO = \"auto\" INNER = \"inner\" LINEAR_NORM = \"linear_norm\"", "\"tensorboard_log_level\" SEQUENCE_FEATURES = \"sequence_features\" SENTENCE_FEATURES = \"sentence_features\" FEATURIZERS = \"featurizers\"", "avoid index out of bounds errors by tf.one_hot. LABEL_PAD_ID =", "It should be < 0 to avoid index out of", "\"inner\" LINEAR_NORM = \"linear_norm\" COSINE = \"cosine\" CROSS_ENTROPY = \"cross_entropy\"", "= \"score\" THRESHOLD_KEY = \"threshold\" SEVERITY_KEY = \"severity\" NAME =", "= \"label\" IDS = \"ids\" # LABEL_PAD_ID is used to", "SCORE_KEY = \"score\" THRESHOLD_KEY = \"threshold\" SEVERITY_KEY = \"severity\" NAME", "\"loss_type\" NUM_NEG = \"number_of_negative_examples\" MAX_POS_SIM = \"maximum_positive_similarity\" MAX_NEG_SIM = \"maximum_negative_similarity\"", "= \"evaluate_every_number_of_epochs\" EVAL_NUM_EXAMPLES = \"evaluate_on_number_of_examples\" INTENT_CLASSIFICATION = \"intent_classification\" ENTITY_RECOGNITION =", "= \"use_value_relative_attention\" MAX_RELATIVE_POSITION = \"max_relative_position\" BATCH_SIZES = \"batch_size\" BATCH_STRATEGY =", "= \"number_of_transformer_layers\" NUM_HEADS = \"number_of_attention_heads\" UNIDIRECTIONAL_ENCODER = \"unidirectional_encoder\" KEY_RELATIVE_ATTENTION =", "= \"concat_dimension\" EMBEDDING_DIMENSION = \"embedding_dimension\" ENCODING_DIMENSION = \"encoding_dimension\" SIMILARITY_TYPE =", "= \"use_dense_input_dropout\" RANKING_LENGTH = \"ranking_length\" MODEL_CONFIDENCE = \"model_confidence\" BILOU_FLAG =", "and superseeded by CONNECTION_DENSITY CONNECTION_DENSITY = \"connection_density\" EVAL_NUM_EPOCHS = \"evaluate_every_number_of_epochs\"", "= \"transformer_size\" NUM_TRANSFORMER_LAYERS = \"number_of_transformer_layers\" NUM_HEADS = \"number_of_attention_heads\" UNIDIRECTIONAL_ENCODER =", "= \"intent_classification\" ENTITY_RECOGNITION = \"entity_recognition\" MASKED_LM = \"use_masked_language_model\" SPARSE_INPUT_DROPOUT =", "\"weight_sparsity\" # Deprecated and superseeded by CONNECTION_DENSITY CONNECTION_DENSITY = \"connection_density\"", "\"negative_scores\" RANKING_KEY = \"label_ranking\" QUERY_INTENT_KEY = \"query_intent\" SCORE_KEY = \"score\"", "BATCH_SIZES = \"batch_size\" BATCH_STRATEGY = \"batch_strategy\" EPOCHS = \"epochs\" RANDOM_SEED", "tf.one_hot. LABEL_PAD_ID = -1 HIDDEN_LAYERS_SIZES = \"hidden_layers_sizes\" SHARE_HIDDEN_LAYERS = \"share_hidden_layers\"", "VALUE_RELATIVE_ATTENTION = \"use_value_relative_attention\" MAX_RELATIVE_POSITION = \"max_relative_position\" BATCH_SIZES = \"batch_size\" BATCH_STRATEGY", "HIDDEN_LAYERS_SIZES = \"hidden_layers_sizes\" SHARE_HIDDEN_LAYERS = \"share_hidden_layers\" TRANSFORMER_SIZE = \"transformer_size\" NUM_TRANSFORMER_LAYERS", "# constants for configuration parameters of our tensorflow models LABEL", "CROSS_ENTROPY = \"cross_entropy\" BALANCED = \"balanced\" SEQUENCE = \"sequence\" SEQUENCE_LENGTH", "\"number_of_transformer_layers\" NUM_HEADS = \"number_of_attention_heads\" UNIDIRECTIONAL_ENCODER = \"unidirectional_encoder\" KEY_RELATIVE_ATTENTION = \"use_key_relative_attention\"", "= \"scale_loss\" REGULARIZATION_CONSTANT = \"regularization_constant\" NEGATIVE_MARGIN_SCALE = \"negative_margin_scale\" DROP_RATE =", "= \"use_text_as_label\" SOFTMAX = \"softmax\" MARGIN = \"margin\" AUTO =", "NUM_TRANSFORMER_LAYERS = \"number_of_transformer_layers\" NUM_HEADS = \"number_of_attention_heads\" UNIDIRECTIONAL_ENCODER = \"unidirectional_encoder\" KEY_RELATIVE_ATTENTION", "RETRIEVAL_INTENT = \"retrieval_intent\" USE_TEXT_AS_LABEL = \"use_text_as_label\" SOFTMAX = \"softmax\" MARGIN", "MARGIN = \"margin\" AUTO = \"auto\" INNER = \"inner\" LINEAR_NORM", "= \"margin\" AUTO = \"auto\" INNER = \"inner\" LINEAR_NORM =", "out of bounds errors by tf.one_hot. LABEL_PAD_ID = -1 HIDDEN_LAYERS_SIZES", "DROP_RATE_ATTENTION = \"drop_rate_attention\" DROP_RATE_DIALOGUE = \"drop_rate_dialogue\" DROP_RATE_LABEL = \"drop_rate_label\" CONSTRAIN_SIMILARITIES", "\"transformer_size\" NUM_TRANSFORMER_LAYERS = \"number_of_transformer_layers\" NUM_HEADS = \"number_of_attention_heads\" UNIDIRECTIONAL_ENCODER = \"unidirectional_encoder\"", "\"encoding_dimension\" SIMILARITY_TYPE = \"similarity_type\" LOSS_TYPE = \"loss_type\" NUM_NEG = \"number_of_negative_examples\"", "to pad multi-label training examples. # It should be <", "THRESHOLD_KEY = \"threshold\" SEVERITY_KEY = \"severity\" NAME = \"name\" EPOCH_OVERRIDE", "= \"ids\" # LABEL_PAD_ID is used to pad multi-label training", "\"use_maximum_negative_similarity\" SCALE_LOSS = \"scale_loss\" REGULARIZATION_CONSTANT = \"regularization_constant\" NEGATIVE_MARGIN_SCALE = \"negative_margin_scale\"", "= \"batch_size\" BATCH_STRATEGY = \"batch_strategy\" EPOCHS = \"epochs\" RANDOM_SEED =", "RANDOM_SEED = \"random_seed\" LEARNING_RATE = \"learning_rate\" DENSE_DIMENSION = \"dense_dimension\" CONCAT_DIMENSION", "= \"maximum_positive_similarity\" MAX_NEG_SIM = \"maximum_negative_similarity\" USE_MAX_NEG_SIM = \"use_maximum_negative_similarity\" SCALE_LOSS =", "\"ignore_intents_list\" TOLERANCE = \"tolerance\" POSITIVE_SCORES_KEY = \"positive_scores\" NEGATIVE_SCORES_KEY = \"negative_scores\"", "SENTENCE = \"sentence\" POOLING = \"pooling\" MAX_POOLING = \"max\" MEAN_POOLING", "LOSS_TYPE = \"loss_type\" NUM_NEG = \"number_of_negative_examples\" MAX_POS_SIM = \"maximum_positive_similarity\" MAX_NEG_SIM", "superseeded by CONNECTION_DENSITY CONNECTION_DENSITY = \"connection_density\" EVAL_NUM_EPOCHS = \"evaluate_every_number_of_epochs\" EVAL_NUM_EXAMPLES", "\"query_intent\" SCORE_KEY = \"score\" THRESHOLD_KEY = \"threshold\" SEVERITY_KEY = \"severity\"", "EVAL_NUM_EPOCHS = \"evaluate_every_number_of_epochs\" EVAL_NUM_EXAMPLES = \"evaluate_on_number_of_examples\" INTENT_CLASSIFICATION = \"intent_classification\" ENTITY_RECOGNITION", "LABEL = \"label\" IDS = \"ids\" # LABEL_PAD_ID is used", "= -1 HIDDEN_LAYERS_SIZES = \"hidden_layers_sizes\" SHARE_HIDDEN_LAYERS = \"share_hidden_layers\" TRANSFORMER_SIZE =", "\"scale_loss\" REGULARIZATION_CONSTANT = \"regularization_constant\" NEGATIVE_MARGIN_SCALE = \"negative_margin_scale\" DROP_RATE = \"drop_rate\"", "bounds errors by tf.one_hot. LABEL_PAD_ID = -1 HIDDEN_LAYERS_SIZES = \"hidden_layers_sizes\"", "= \"max\" MEAN_POOLING = \"mean\" TENSORBOARD_LOG_DIR = \"tensorboard_log_directory\" TENSORBOARD_LOG_LEVEL =", "MAX_NEG_SIM = \"maximum_negative_similarity\" USE_MAX_NEG_SIM = \"use_maximum_negative_similarity\" SCALE_LOSS = \"scale_loss\" REGULARIZATION_CONSTANT", "\"intent_classification\" ENTITY_RECOGNITION = \"entity_recognition\" MASKED_LM = \"use_masked_language_model\" SPARSE_INPUT_DROPOUT = \"use_sparse_input_dropout\"", "should be < 0 to avoid index out of bounds", "LABEL_PAD_ID is used to pad multi-label training examples. # It", "\"drop_rate_label\" CONSTRAIN_SIMILARITIES = \"constrain_similarities\" WEIGHT_SPARSITY = \"weight_sparsity\" # Deprecated and", "-1 HIDDEN_LAYERS_SIZES = \"hidden_layers_sizes\" SHARE_HIDDEN_LAYERS = \"share_hidden_layers\" TRANSFORMER_SIZE = \"transformer_size\"", "used to pad multi-label training examples. # It should be", "USE_MAX_NEG_SIM = \"use_maximum_negative_similarity\" SCALE_LOSS = \"scale_loss\" REGULARIZATION_CONSTANT = \"regularization_constant\" NEGATIVE_MARGIN_SCALE", "our tensorflow models LABEL = \"label\" IDS = \"ids\" #", "\"cross_entropy\" BALANCED = \"balanced\" SEQUENCE = \"sequence\" SEQUENCE_LENGTH = f\"{SEQUENCE}_lengths\"", "\"label_ranking\" QUERY_INTENT_KEY = \"query_intent\" SCORE_KEY = \"score\" THRESHOLD_KEY = \"threshold\"", "to avoid index out of bounds errors by tf.one_hot. LABEL_PAD_ID", "ENCODING_DIMENSION = \"encoding_dimension\" SIMILARITY_TYPE = \"similarity_type\" LOSS_TYPE = \"loss_type\" NUM_NEG", "UNIDIRECTIONAL_ENCODER = \"unidirectional_encoder\" KEY_RELATIVE_ATTENTION = \"use_key_relative_attention\" VALUE_RELATIVE_ATTENTION = \"use_value_relative_attention\" MAX_RELATIVE_POSITION", "ENTITY_RECOGNITION = \"entity_recognition\" MASKED_LM = \"use_masked_language_model\" SPARSE_INPUT_DROPOUT = \"use_sparse_input_dropout\" DENSE_INPUT_DROPOUT", "= \"dense_dimension\" CONCAT_DIMENSION = \"concat_dimension\" EMBEDDING_DIMENSION = \"embedding_dimension\" ENCODING_DIMENSION =", "= \"drop_rate_label\" CONSTRAIN_SIMILARITIES = \"constrain_similarities\" WEIGHT_SPARSITY = \"weight_sparsity\" # Deprecated", "pad multi-label training examples. # It should be < 0", "KEY_RELATIVE_ATTENTION = \"use_key_relative_attention\" VALUE_RELATIVE_ATTENTION = \"use_value_relative_attention\" MAX_RELATIVE_POSITION = \"max_relative_position\" BATCH_SIZES", "AUTO = \"auto\" INNER = \"inner\" LINEAR_NORM = \"linear_norm\" COSINE", "= \"constrain_similarities\" WEIGHT_SPARSITY = \"weight_sparsity\" # Deprecated and superseeded by", "\"sentence_features\" FEATURIZERS = \"featurizers\" CHECKPOINT_MODEL = \"checkpoint_model\" MASK = \"mask\"", "\"positive_scores\" NEGATIVE_SCORES_KEY = \"negative_scores\" RANKING_KEY = \"label_ranking\" QUERY_INTENT_KEY = \"query_intent\"", "\"ids\" # LABEL_PAD_ID is used to pad multi-label training examples.", "SEQUENCE_FEATURES = \"sequence_features\" SENTENCE_FEATURES = \"sentence_features\" FEATURIZERS = \"featurizers\" CHECKPOINT_MODEL", "\"unidirectional_encoder\" KEY_RELATIVE_ATTENTION = \"use_key_relative_attention\" VALUE_RELATIVE_ATTENTION = \"use_value_relative_attention\" MAX_RELATIVE_POSITION = \"max_relative_position\"", "\"ranking_length\" MODEL_CONFIDENCE = \"model_confidence\" BILOU_FLAG = \"BILOU_flag\" RETRIEVAL_INTENT = \"retrieval_intent\"", "= \"query_intent\" SCORE_KEY = \"score\" THRESHOLD_KEY = \"threshold\" SEVERITY_KEY =", "EMBEDDING_DIMENSION = \"embedding_dimension\" ENCODING_DIMENSION = \"encoding_dimension\" SIMILARITY_TYPE = \"similarity_type\" LOSS_TYPE", "CONSTRAIN_SIMILARITIES = \"constrain_similarities\" WEIGHT_SPARSITY = \"weight_sparsity\" # Deprecated and superseeded", "of bounds errors by tf.one_hot. LABEL_PAD_ID = -1 HIDDEN_LAYERS_SIZES =", "= \"tensorboard_log_directory\" TENSORBOARD_LOG_LEVEL = \"tensorboard_log_level\" SEQUENCE_FEATURES = \"sequence_features\" SENTENCE_FEATURES =", "= \"sequence_features\" SENTENCE_FEATURES = \"sentence_features\" FEATURIZERS = \"featurizers\" CHECKPOINT_MODEL =", "= \"featurizers\" CHECKPOINT_MODEL = \"checkpoint_model\" MASK = \"mask\" IGNORE_INTENTS_LIST =", "= \"unidirectional_encoder\" KEY_RELATIVE_ATTENTION = \"use_key_relative_attention\" VALUE_RELATIVE_ATTENTION = \"use_value_relative_attention\" MAX_RELATIVE_POSITION =", "\"concat_dimension\" EMBEDDING_DIMENSION = \"embedding_dimension\" ENCODING_DIMENSION = \"encoding_dimension\" SIMILARITY_TYPE = \"similarity_type\"", "= \"share_hidden_layers\" TRANSFORMER_SIZE = \"transformer_size\" NUM_TRANSFORMER_LAYERS = \"number_of_transformer_layers\" NUM_HEADS =", "= \"learning_rate\" DENSE_DIMENSION = \"dense_dimension\" CONCAT_DIMENSION = \"concat_dimension\" EMBEDDING_DIMENSION =", "= \"similarity_type\" LOSS_TYPE = \"loss_type\" NUM_NEG = \"number_of_negative_examples\" MAX_POS_SIM =", "= \"pooling\" MAX_POOLING = \"max\" MEAN_POOLING = \"mean\" TENSORBOARD_LOG_DIR =", "\"random_seed\" LEARNING_RATE = \"learning_rate\" DENSE_DIMENSION = \"dense_dimension\" CONCAT_DIMENSION = \"concat_dimension\"", "SCALE_LOSS = \"scale_loss\" REGULARIZATION_CONSTANT = \"regularization_constant\" NEGATIVE_MARGIN_SCALE = \"negative_margin_scale\" DROP_RATE", "for configuration parameters of our tensorflow models LABEL = \"label\"", "CONCAT_DIMENSION = \"concat_dimension\" EMBEDDING_DIMENSION = \"embedding_dimension\" ENCODING_DIMENSION = \"encoding_dimension\" SIMILARITY_TYPE", "= \"inner\" LINEAR_NORM = \"linear_norm\" COSINE = \"cosine\" CROSS_ENTROPY =", "= \"cosine\" CROSS_ENTROPY = \"cross_entropy\" BALANCED = \"balanced\" SEQUENCE =", "= \"use_maximum_negative_similarity\" SCALE_LOSS = \"scale_loss\" REGULARIZATION_CONSTANT = \"regularization_constant\" NEGATIVE_MARGIN_SCALE =", "INNER = \"inner\" LINEAR_NORM = \"linear_norm\" COSINE = \"cosine\" CROSS_ENTROPY", "= \"weight_sparsity\" # Deprecated and superseeded by CONNECTION_DENSITY CONNECTION_DENSITY =", "= \"maximum_negative_similarity\" USE_MAX_NEG_SIM = \"use_maximum_negative_similarity\" SCALE_LOSS = \"scale_loss\" REGULARIZATION_CONSTANT =", "tensorflow models LABEL = \"label\" IDS = \"ids\" # LABEL_PAD_ID", "= \"mask\" IGNORE_INTENTS_LIST = \"ignore_intents_list\" TOLERANCE = \"tolerance\" POSITIVE_SCORES_KEY =", "INTENT_CLASSIFICATION = \"intent_classification\" ENTITY_RECOGNITION = \"entity_recognition\" MASKED_LM = \"use_masked_language_model\" SPARSE_INPUT_DROPOUT", "LEARNING_RATE = \"learning_rate\" DENSE_DIMENSION = \"dense_dimension\" CONCAT_DIMENSION = \"concat_dimension\" EMBEDDING_DIMENSION", "IGNORE_INTENTS_LIST = \"ignore_intents_list\" TOLERANCE = \"tolerance\" POSITIVE_SCORES_KEY = \"positive_scores\" NEGATIVE_SCORES_KEY", "parameters of our tensorflow models LABEL = \"label\" IDS =", "LINEAR_NORM = \"linear_norm\" COSINE = \"cosine\" CROSS_ENTROPY = \"cross_entropy\" BALANCED", "\"constrain_similarities\" WEIGHT_SPARSITY = \"weight_sparsity\" # Deprecated and superseeded by CONNECTION_DENSITY", "= \"sentence\" POOLING = \"pooling\" MAX_POOLING = \"max\" MEAN_POOLING =", "= \"drop_rate_dialogue\" DROP_RATE_LABEL = \"drop_rate_label\" CONSTRAIN_SIMILARITIES = \"constrain_similarities\" WEIGHT_SPARSITY =", "\"auto\" INNER = \"inner\" LINEAR_NORM = \"linear_norm\" COSINE = \"cosine\"", "= \"embedding_dimension\" ENCODING_DIMENSION = \"encoding_dimension\" SIMILARITY_TYPE = \"similarity_type\" LOSS_TYPE =", "NEGATIVE_SCORES_KEY = \"negative_scores\" RANKING_KEY = \"label_ranking\" QUERY_INTENT_KEY = \"query_intent\" SCORE_KEY", "\"batch_strategy\" EPOCHS = \"epochs\" RANDOM_SEED = \"random_seed\" LEARNING_RATE = \"learning_rate\"", "\"drop_rate\" DROP_RATE_ATTENTION = \"drop_rate_attention\" DROP_RATE_DIALOGUE = \"drop_rate_dialogue\" DROP_RATE_LABEL = \"drop_rate_label\"", "NUM_HEADS = \"number_of_attention_heads\" UNIDIRECTIONAL_ENCODER = \"unidirectional_encoder\" KEY_RELATIVE_ATTENTION = \"use_key_relative_attention\" VALUE_RELATIVE_ATTENTION", "\"drop_rate_attention\" DROP_RATE_DIALOGUE = \"drop_rate_dialogue\" DROP_RATE_LABEL = \"drop_rate_label\" CONSTRAIN_SIMILARITIES = \"constrain_similarities\"", "\"number_of_attention_heads\" UNIDIRECTIONAL_ENCODER = \"unidirectional_encoder\" KEY_RELATIVE_ATTENTION = \"use_key_relative_attention\" VALUE_RELATIVE_ATTENTION = \"use_value_relative_attention\"", "POOLING = \"pooling\" MAX_POOLING = \"max\" MEAN_POOLING = \"mean\" TENSORBOARD_LOG_DIR", "TOLERANCE = \"tolerance\" POSITIVE_SCORES_KEY = \"positive_scores\" NEGATIVE_SCORES_KEY = \"negative_scores\" RANKING_KEY", "= \"ranking_length\" MODEL_CONFIDENCE = \"model_confidence\" BILOU_FLAG = \"BILOU_flag\" RETRIEVAL_INTENT =", "DENSE_INPUT_DROPOUT = \"use_dense_input_dropout\" RANKING_LENGTH = \"ranking_length\" MODEL_CONFIDENCE = \"model_confidence\" BILOU_FLAG", "RANKING_LENGTH = \"ranking_length\" MODEL_CONFIDENCE = \"model_confidence\" BILOU_FLAG = \"BILOU_flag\" RETRIEVAL_INTENT", "MAX_RELATIVE_POSITION = \"max_relative_position\" BATCH_SIZES = \"batch_size\" BATCH_STRATEGY = \"batch_strategy\" EPOCHS", "= \"drop_rate_attention\" DROP_RATE_DIALOGUE = \"drop_rate_dialogue\" DROP_RATE_LABEL = \"drop_rate_label\" CONSTRAIN_SIMILARITIES =", "CONNECTION_DENSITY CONNECTION_DENSITY = \"connection_density\" EVAL_NUM_EPOCHS = \"evaluate_every_number_of_epochs\" EVAL_NUM_EXAMPLES = \"evaluate_on_number_of_examples\"", "\"BILOU_flag\" RETRIEVAL_INTENT = \"retrieval_intent\" USE_TEXT_AS_LABEL = \"use_text_as_label\" SOFTMAX = \"softmax\"", "\"maximum_positive_similarity\" MAX_NEG_SIM = \"maximum_negative_similarity\" USE_MAX_NEG_SIM = \"use_maximum_negative_similarity\" SCALE_LOSS = \"scale_loss\"", "\"label\" IDS = \"ids\" # LABEL_PAD_ID is used to pad", "SIMILARITY_TYPE = \"similarity_type\" LOSS_TYPE = \"loss_type\" NUM_NEG = \"number_of_negative_examples\" MAX_POS_SIM", "= \"loss_type\" NUM_NEG = \"number_of_negative_examples\" MAX_POS_SIM = \"maximum_positive_similarity\" MAX_NEG_SIM =", "CONNECTION_DENSITY = \"connection_density\" EVAL_NUM_EPOCHS = \"evaluate_every_number_of_epochs\" EVAL_NUM_EXAMPLES = \"evaluate_on_number_of_examples\" INTENT_CLASSIFICATION", "\"balanced\" SEQUENCE = \"sequence\" SEQUENCE_LENGTH = f\"{SEQUENCE}_lengths\" SENTENCE = \"sentence\"", "BALANCED = \"balanced\" SEQUENCE = \"sequence\" SEQUENCE_LENGTH = f\"{SEQUENCE}_lengths\" SENTENCE", "0 to avoid index out of bounds errors by tf.one_hot.", "NUM_NEG = \"number_of_negative_examples\" MAX_POS_SIM = \"maximum_positive_similarity\" MAX_NEG_SIM = \"maximum_negative_similarity\" USE_MAX_NEG_SIM", "errors by tf.one_hot. LABEL_PAD_ID = -1 HIDDEN_LAYERS_SIZES = \"hidden_layers_sizes\" SHARE_HIDDEN_LAYERS", "\"batch_size\" BATCH_STRATEGY = \"batch_strategy\" EPOCHS = \"epochs\" RANDOM_SEED = \"random_seed\"", "\"sequence_features\" SENTENCE_FEATURES = \"sentence_features\" FEATURIZERS = \"featurizers\" CHECKPOINT_MODEL = \"checkpoint_model\"", "EVAL_NUM_EXAMPLES = \"evaluate_on_number_of_examples\" INTENT_CLASSIFICATION = \"intent_classification\" ENTITY_RECOGNITION = \"entity_recognition\" MASKED_LM", "multi-label training examples. # It should be < 0 to" ]
[ "= normaiseAngle(self.start_heading + 80 * -(1 if self.left_or_right == self.RIGHT", "= 1 * math.pi / 180 steer_speed = speed *", "self.ds_index = distance_sensor_index self.wall_point_kind = wall_point_kind self.left_mid_point_index = left_mid_point_index self.left_point_index", "== MazeAttitude.LEFT_WALL: wall = \"left wall\" elif point == MazeAttitude.RIGHT_WALL:", "wall = \"back wall\" else: wall = \"no wall\" print(\"{:3d}", "315: SQRT2 * 10} radar_last_values = {0: 10, 45: SQRT2", "= speed self.start_heading = 0 self.last_heading = 0 self.requested_heading =", "lmline.angle * MazeAttitude.ANGLE_TOLLERANCE >= lline.angle >= lmline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index]", "self.Wall(90, 2, self.RIGHT_WALL, 0, 45, 90, 135) self.left_wall = self.Wall(270,", "rfd={: 4d}\".format(int(diagonal_distance))) self.left_or_right = self.RIGHT elif self.left_or_right == self.RIGHT and", "\"Front distance not correct: d={:4d} s={:2d} delta={:4d}\".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0])) else:", "self.rover.getRoverState() self.start_heading = state.heading.heading self.requested_heading = normaiseAngle(self.start_heading + 80 *", "= 150 elif -150 < distance < 0: distance =", "distance_error > distance_speed: angle = -math.pi / 4 if front_distance", "= factor self.adjust = adjust self.angle = None def calcAngle(self,", "state.radar.radar[270] = SQRT2 * 13 # state.radar.radar[225] = SQRT2 *", "angle={: 3d} heading={: 3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(wall_angle * 180", "getActionName(self): return \"Turn-Around-Corner\" class DriverForwardForTimeAction(Action): def __init__(self, agent, time, speed,", "PID SQRT2 = math.sqrt(2) PIhalf = math.pi / 2 class", "* MazeAttitude.ANGLE_TOLLERANCE >= lline.angle >= lmline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] =", "= distances[self.left_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif lline.angle", "= -1 RIGHT = 1 def __init__(self, agent): super(MazeAction, self).__init__(agent)", "== MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind if pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index]", "== MazeAttitude.UNKNOWN: points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) else: if dlong1", "last_distance) < 100: return distance return None def updateUndefinedWall(wall, preferable_wall,", "import normaiseAngle, angleDiference from challenge_utils import Action, PID SQRT2 =", "state): def getPointDistance(state, angle): distance = state.radar.radar[angle] status = state.radar.status[angle]", "self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel]) log(LOG_LEVEL_DEBUG, \"Reset odo to \"", "self.left_or_right == self.RIGHT else -1)) self.pid = PID(1, 0.0, 0.05,", "self.distance = distance self.speed = speed self.next_action = next_action if", "== MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances)", "< 0: distance = -150 distance = -distance distance_from_wall =", "<reponame>GamesCreatorsClub/GCC-Rover # # Copyright 2016-2019 Games Creators Club # #", "left_mid_point_index self.left_point_index = left_point_index self.mid_point_index = mid_point_index self.right_point_index = right_point_index", "if abs(distance_error) < 10: angle = 0 elif distance_error >", "self.front_wall, self.back_wall.ds_angle: self.back_wall} self.points = {0: 0, 45: 0, 90:", "distance)) else: self.distance = abs(int(math.cos(angle) * distance)) def setAngleAndDistance(self, angle,", "start(self): super(ChicaneAction, self).start() def end(self): super(ChicaneAction, self).end() def next(self): if", "+ str(self.required_odo) + \"; starting...\") self.rover.command(pyroslib.publish, 300, 120) # pyroslib.publish(\"move/steer\",", "+ math.pi return a class Line: def __init__(self, line_index, long_point_index,", "-PIhalf, self.back_wall) # TODO calc gaps class MoveForwardOnOdo(Action): def __init__(self,", "MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[lline.long_point_index] angle = MazeAttitude.normAngle(mline.angle - PIhalf) distance", "-math.pi / 4 if front_distance < 450: angle -= math.pi", "45, 90, -1, PIhalf), self.L90_135: self.Line(self.L90_135, 135, 90, 1, -PIhalf),", "* (450 - front_distance) / 1800 # divide with 10", "< 0: expected_diagonal_distance = front_distance * 2 * math.cos(math.pi /", "\" + (\"L\" if self.left_or_right == self.LEFT else \"R\") class", "except BaseException as ex: log(LOG_LEVEL_ALWAYS, \"Domain error wa={: 3d} dw={:", "int(left_distances), int(right_distances))) return self.left_corner_action else: log(LOG_LEVEL_INFO, \"Found corner 2 -", "else -1) self.speed = speed self.start_heading = 0 self.last_heading =", "int(state.left_front_distance_of_wall), int(state.right_wall_angle * 180 / math.pi), int(state.right_front_distance_of_wall), int(steer_speed), int(distance_speed), int(distance),", "radar_status, Radar(0, radar_last_values, radar_status)) state = RoverState(None, None, None, radar,", "def start(self): super(ChicaneAction, self).start() def end(self): super(ChicaneAction, self).end() def next(self):", "d={: 4d} s={: 3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance =", "self.L0_45: self.Line(self.L0_45, 45, 0, 1, -math.pi), self.L45_90: self.Line(self.L45_90, 45, 90,", "= right_point_index self.is_front_or_back = self.ds_angle == 0 or self.ds_angle ==", "\"Going forward for \" + str(self.time) + \" ticks.\") return", "elif lline.angle is not None and mline.angle is not None:", "[self.walls[w_ds_angle] for w_ds_angle in self.WALLS if self.distances[w_ds_angle] is not None]", "- PIhalf) distance = distances[self.right_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle,", "for wheel_name in WHEEL_NAMES: self.required_odo[wheel_name] = distance def start(self): super(MoveForwardOnOdo,", "ticks.\") return self return self.next_action if __name__ == \"__main__\": from", "wall)) def printWall(w): if w.angle is None: print(\"Wall {:3d} ->", "self.LEFT, int(self.distance * 1), self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT,", "is not None and mline.angle is not None: if lline.angle", "return self def execute(self): state = self.rover.getRoverState() left_diagonal_distance = state.radar.radar[315]", "updateUndefinedWall(self.right_wall, self.front_wall, PIhalf, self.back_wall) updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf, self.back_wall) # TODO", "self.required_odo = {'fl': 0, 'fr': 0, 'bl': 0, 'br': 0}", "rfd={: 4d} fd={: 4d} \".format(int(state.right_front_distance_of_wall), int(front_distance))) return self.right_corner_action return self", "int(0), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self):", "math.pi / 4 else: try: angle = -math.asin(distance_error / distance_speed)", "getActionName(self): return \"Chicane \" + (\"L\" if self.left_or_right == self.LEFT", "self).__init__(agent) self.stop_action = stop_action self.required_odo = {'fl': 0, 'fr': 0,", "else: log(LOG_LEVEL_INFO, \"Found corner - turning, lfd={: 4d} fd={: 4d}", "225: SQRT2 * 10, 270: 10, 315: SQRT2 * 10}", "0: distance = -150 distance_from_wall = state.left_wall_distance distance_error = distance_from_wall", "state = self.rover.getRoverState() heading = state.heading.heading last_heading = self.last_heading self.last_heading", "= distances[self.mid_point_index] if distance < 1: self.distance = 0 else:", "self.left_corner_action if front_distance < 550 and state.radar.radar_deltas[0] < 0: left_distances", "elif self.left_or_right == self.RIGHT and diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found", "speed * speed_steer_fudge_factor distance_speed = speed * speed_distance_fudge_factor if self.left_or_right", "self.left_corner_action else: log(LOG_LEVEL_INFO, \"Found corner 2 - turning left, fd={:", "self.factor + self.adjust) else: self.angle = None class Wall: def", "def __init__(self, line_index, long_point_index, short_point_index, factor, adjust): self.line_index = line_index", "calcAngle(self, distances): long_distance = distances[self.long_point_index] short_distance = distances[self.short_point_index] if long_distance", "state.radar.radar[0] gain = 60 offset = 150 # Values that", "p in self.POINTS} for line in self.lines: self.lines[line].calcAngle(self.distances) wls =", "2016-2019 Games Creators Club # # MIT License # import", "315: SQRT2 * 10} radar_status = {0: 0, 45: 0,", "MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind if pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] =", "!= 0 and abs(state.radar.radar_deltas[0]) > 100: log(LOG_LEVEL_INFO, \"Front distance not", "float(time.time()), int(front_distance), int(0 * 180 / math.pi), int(distance_from_wall), int(distance_error), int(0", "elif lline.angle is not None and plong1 == MazeAttitude.UNKNOWN and", "dlong1 >= dlong2 and plong2 == MazeAttitude.UNKNOWN and pmid ==", "import pyroslib import pyroslib.logging import time from pyroslib.logging import log,", "+ str(self.speed)) def end(self): super(MazeTurnAroundCornerAction, self).end() def next(self): heading =", "= long_distance / SQRT2 self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 - short_distance)", "int(expected_diagonal_distance))) self.been_in_chicane = True return ChicaneAction(self, self.LEFT, self.distance, self.speed, next_action=self)", "p) for p in self.POINTS} for line in self.lines: self.lines[line].calcAngle(self.distances)", "printWalls(): for p in attitude.points: printWallLines(p) for w in attitude.walls:", "distance): for wheel_name in WHEEL_NAMES: self.required_odo[wheel_name] = distance def start(self):", "self.setAngleAndDistance(angle, distance) elif dlong1 >= dlong2 and plong2 != MazeAttitude.UNKNOWN", "+ \" \" + str(angle)) wheel_orientations = state.wheel_odos.odos log(LOG_LEVEL_INFO, \"{:16.3f}:", "== self.LEFT: diagonal_distance = state.radar.radar[45] else: diagonal_distance = state.radar.radar[315] if", "wall = \"left wall\" elif point == MazeAttitude.RIGHT_WALL: wall =", "= 270 self.a3 = 225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, self.distance,", ">= mline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[rline.long_point_index] angle = MazeAttitude.normAngle(mline.angle", "= long_point_index self.factor = factor self.adjust = adjust self.angle =", "180: 0, 225: 0, 270: 0, 315: 0} attitude =", "dist=4 # self.speed = 150 # 150 speed = 50", "distance = 1000000000 distance_from_wall = state.radar.radar[270] distance_error = distance_from_wall -", "13 # state.radar.radar[225] = SQRT2 * 12 # attitude.calculate(state) #", "super(MoveForwardOnOdo, self).start() state = self.rover.getRoverState() for wheel in self.required_odo: self.required_odo[wheel]", "0 or self.ds_angle == 180 self.selected_line = None self.angle =", "1.2: log(LOG_LEVEL_INFO, \"Found chicane... lfd={: 4d} fd={: 4d} dd={: 4d}", "= angle self.distance = distance def tryFindingWall(self, distances, lines, points):", "self.a3 = 225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, self.distance, self.speed, self)", "distance = 1000000000 else: distance = steer_speed / state.left_wall_angle if", "* speed_distance_fudge_factor if self.left_or_right == self.RIGHT: distance = -1000000000 distance_from_wall", "self.angle = angle self.distance = distance def tryFindingWall(self, distances, lines,", "else: try: angle = -math.asin(distance_error / distance_speed) except BaseException as", "def __init__(self, agent): super(MazeAction, self).__init__(agent) def check_next_action_conditions(self): return self class", "/ 4 + state.left_wall_angle) else: expected_diagonal_distance = front_distance * math.cos(state.left_wall_angle)", "0 <= distance < 150: distance = 150 elif -150", "< state.left_wall_angle < min_angle: distance = 1000000000 else: distance =", "+ wall_adjust), self.distances[wall.mid_point_index]) else: wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust), self.distances[wall.mid_point_index]) self.points[wall.ds_angle] =", "distance = steer_speed / state.left_wall_angle if 0 <= distance <", "-> 450/10 - 45deg else: try: angle = math.asin(distance_error /", "Games Creators Club # # MIT License # import math", "point is None: print(\"{:3d} -> line at {:3d} angle\".format(a, angle))", "None: print(\"Wall {:3d} -> is too far - not calculated\".format(w.ds_angle))", "* 10, 90: 10, 135: SQRT2 * 10, 180: 10,", "a = a + math.pi return a class Line: def", "= None self.angle = None self.distance = None def setAngle(self,", "distance = distances[self.right_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif", "lmline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[lline.long_point_index] angle = MazeAttitude.normAngle(mline.angle -", "L270_315 = 270 L315_0 = 315 LINES = [L0_45, L45_90,", "45: 0, 90: 0, 135: 0, 180: 0, 225: 0,", "second part of chicane, rfd={: 4d}\".format(int(diagonal_distance))) self.left_or_right = self.RIGHT elif", "self.a2 = 270 self.a3 = 225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT,", "= 270 self.a3 = 225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance", "= self.wall_point_kind self.setAngle(mline.angle, distances) elif plong1 == MazeAttitude.UNKNOWN and pmid", "< 700: log(LOG_LEVEL_INFO, \"Found final corner - turning to finish,", "str(self.speed)) def end(self): super(MazeTurnAroundCornerAction, self).end() def next(self): heading = state.heading.heading", "0} def calculate(self, state): def getPointDistance(state, angle): distance = state.radar.radar[angle]", "distance not correct: d={:4d} s={:2d} delta={:4d}\".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0])) else: if", "math.pi), self.L0_45: self.Line(self.L0_45, 45, 0, 1, -math.pi), self.L45_90: self.Line(self.L45_90, 45,", "too far - distance not calculated\".format(w.ds_angle, int(w.angle * 180 /", "self.last_heading = 0 self.requested_heading = 0 self.pid = None self.next_action", "int(state.left_wall_angle * 180 / math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle * 180 /", "\"left wall\" elif point == MazeAttitude.RIGHT_WALL: wall = \"right wall\"", "def start(self): self.rover.command(pyroslib.publish, self.speed, 0) # pyroslib.publish(\"move/drive\", \"0 \" +", "agent): super(MazeAction, self).__init__(agent) def check_next_action_conditions(self): return self class ChicaneAction(MazeAction): def", "distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS, \"Domain error wa={: 3d}", "state.wheel_odos.odos log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={:", "corner at distance {:04d} at speed {:04d}, start heading {:07.3f},", "next_action=None): super(MazeTurnAroundCornerAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance *", "angle = -math.pi / 4 if front_distance < 450: angle", "# state.radar.radar[225] = SQRT2 * 12 # attitude.calculate(state) # printWalls()", "!= MazeAttitude.UNKNOWN and mline.angle * MazeAttitude.ANGLE_TOLLERANCE >= rline.angle >= mline.angle", "!= MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances)", "225: 0, 270: 0, 315: 0} self.distances = {0: 0,", "= 1 def __init__(self, agent): super(MazeAction, self).__init__(agent) def check_next_action_conditions(self): return", "distance = -150 distance_from_wall = state.left_wall_distance distance_error = distance_from_wall -", "def setRequiredOdo(self, distance): for wheel_name in WHEEL_NAMES: self.required_odo[wheel_name] = distance", "> distance_speed: angle = math.pi / 4 elif distance_error <", "next action \" + self.next_action.getActionName()) else: log(LOG_LEVEL_INFO, \"Finishing turning -", "-> has angle {:3d} but is too far - distance", "-> 450/10 - 45deg else: try: angle = -math.asin(distance_error /", "self.points[wall.ds_angle] = wall.wall_point_kind self.distances = {p: getPointDistance(state, p) for p", "120) # pyroslib.publish(\"move/steer\", \"300 120\") def end(self): super(MoveForwardOnOdo, self).end() def", "self.wall_point_kind self.setAngle(lline.angle, distances) elif plong1 != MazeAttitude.UNKNOWN and pmid ==", "Copyright 2016-2019 Games Creators Club # # MIT License #", "300, 120) # pyroslib.publish(\"move/steer\", \"300 120\") def end(self): super(MoveForwardOnOdo, self).end()", "end(self): super(MoveForwardOnOdo, self).end() def next(self): state = self.rover.getRoverState() do_stop =", "1800 # divide with 10 and by 180 -> 450/10", "int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return \"Corridor\" class MazeTurnAroundCornerAction(MazeAction): def", "radar = Radar(0, radar_values, radar_status, Radar(0, radar_last_values, radar_status)) state =", "dlong1 = distances[lline.long_point_index] dmid = distances[mline.short_point_index] dlong2 = distances[mline.long_point_index] plong1", "fd={: 4d} \".format(int(state.right_front_distance_of_wall), int(front_distance))) return self.right_corner_action return self def execute(self):", "self.back_wall} self.points = {0: 0, 45: 0, 90: 0, 135:", "self.mid_point_index = mid_point_index self.right_point_index = right_point_index self.is_front_or_back = self.ds_angle ==", "= self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif dlong1 >=", "printWall(w): if w.angle is None: print(\"Wall {:3d} -> is too", "next_action): super(DriverForwardForTimeAction, self).__init__(agent) self.time = time self.speed = speed self.next_action", "= SQRT2 * 17 # state.radar.radar[270] = SQRT2 * 13", "pyroslib.publish(\"move/steer\", str(distance) + \" \" + str(self.speed) + \" \"", "abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif lline.angle is not None", "log(LOG_LEVEL_ALWAYS, \"Domain error wa={: 3d} dw={: 4d} de={: 4d} d={:", "/ math.pi) self.rover.command(pyroslib.publish, self.speed, angle, distance) # pyroslib.publish(\"move/steer\", str(distance) +", "heading {:07.3f}\".format(self.distance, self.speed, self.start_heading, self.requested_heading)) self.rover.command(pyroslib.publish, self.speed, 0, self.distance) #", "else: distance = 1000000000 distance_from_wall = state.radar.radar[270] distance_error = distance_from_wall", "= 2 FRONT_WALL = 4 BACK_WALL = 8 NO_GAP =", "speed * speed_distance_fudge_factor if self.left_or_right == self.RIGHT: distance = -1000000000", "< 1.0: do_stop = True if do_stop: return self.stop_action else:", "and plong1 != MazeAttitude.UNKNOWN and lmline.angle * MazeAttitude.ANGLE_TOLLERANCE >= lline.angle", "/ state.right_wall_angle if 0 <= distance < 150: distance =", "speed_steer_fudge_factor distance_speed = speed * speed_distance_fudge_factor if self.left_or_right == self.RIGHT:", "last_heading, angleDiference(heading, last_heading), self.requested_heading, self.error)) def getActionName(self): return \"Turn-Around-Corner\" class", "front_distance = state.radar.radar[0] gain = 60 offset = 150 #", "distance = 1000000000 else: distance = steer_speed / state.right_wall_angle if", "points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif mline.angle", "- turning to finish, rfd={: 4d} fd={: 4d} \".format(int(state.right_front_distance_of_wall), int(front_distance)))", "status = state.radar.status[angle] if status == 0: return distance last_distance", "- 45deg else: try: angle = math.asin(distance_error / distance_speed) except", "< 1.0 or state.radar.radar[315] < 1.0 or state.radar.radar[45] < 1.0:", "= {'fl': 0, 'fr': 0, 'bl': 0, 'br': 0} def", "front_distance) / 1800 # divide with 10 and by 180", "100 and front_distance < 550: expected_diagonal_distance = 0 if state.left_wall_angle", "plong2 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind", "self.Line(self.L315_0, 315, 0, -1, math.pi), self.L0_45: self.Line(self.L0_45, 45, 0, 1,", "log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d}", "-> is too far - not calculated\".format(w.ds_angle)) else: if w.distance", "180 / math.pi) self.rover.command(pyroslib.publish, self.speed, angle, distance) # pyroslib.publish(\"move/steer\", str(distance)", "self.WALLS if self.distances[w_ds_angle] is not None] wall_processing_order = sorted(wls, key=lambda", "state.radar.radar[45] else: diagonal_distance = state.radar.radar[315] if self.left_or_right == self.LEFT and", "if self.left_or_right == self.LEFT else \"R\") class MazeCorridorAction(MazeAction): def __init__(self,", "= state.radar.radar[0] gain = 60 offset = 150 # Values", "second_wall): if wall.angle is None and self.distances[wall.ds_angle] is not None:", "self.left_or_right == self.LEFT and diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found second", "10, 270: 10, 315: SQRT2 * 10} radar_status = {0:", "left_point_index, mid_point_index, right_point_index): self.ds_angle = distance_sensor_angle self.ds_index = distance_sensor_index self.wall_point_kind", "= MazeAttitude.normAngle(mline.angle - PIhalf) distance = distances[self.right_point_index] * abs(math.sin(mline.angle) /", "MazeAction(Action): LEFT = -1 RIGHT = 1 def __init__(self, agent):", "def start(self): super(MazeTurnAroundCornerAction, self).start() state = self.rover.getRoverState() self.start_heading = state.heading.heading", "315, 270, 1, -PIhalf)} self.right_wall = self.Wall(90, 2, self.RIGHT_WALL, 0,", "state.right_wall_angle if 0 <= distance < 150: distance = 150", "math.pi), int(0), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def", "mid_point_index self.right_point_index = right_point_index self.is_front_or_back = self.ds_angle == 0 or", "self.distance angle = 0 if abs(distance_error) < 10: angle =", "= {self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle: self.back_wall} self.points", "s={: 3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: wall_angle = state.left_wall_angle", "math.pi) point = attitude.points[a] if point is None: print(\"{:3d} ->", "315] WALLS = [90, 270, 0, 180] L0_45 = 0", ">= mline.angle >= lline.angle / MazeAttitude.ANGLE_TOLLERANCE: if plong1 == MazeAttitude.UNKNOWN:", "= 0 elif distance_error > 0 and distance_error > distance_speed:", "int(distance_from_wall), int(distance_error), int(0 * 180 / math.pi), int(0), int(0 *", "4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.left_corner_action else: log(LOG_LEVEL_INFO, \"Found corner 2", "SQRT2 * 10} radar_status = {0: 0, 45: 0, 90:", "def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(MazeTurnAroundCornerAction, self).__init__(agent) self.left_or_right", "not None: log(LOG_LEVEL_INFO, \"Finished turning around the corner - invoking", "dlong1 < dlong2 and plong1 != MazeAttitude.UNKNOWN and lmline.angle *", "speed={:04d} h={:07.3f} lh={:07.3f} dh={:07.3f} rh={:07.3f} e={:07.3f}\" .format(self.speed, heading, last_heading, angleDiference(heading,", "= points[rline.long_point_index] angle = MazeAttitude.normAngle(mline.angle + PIhalf) distance = distances[self.left_point_index]", "SQRT2 * 10, 270: 10, 315: SQRT2 * 10} radar_last_values", "min_angle = 1 * math.pi / 180 steer_speed = speed", "4 if front_distance < 450: angle -= math.pi * (450", "4 if front_distance < 450: angle += math.pi * (450", "str(distance) + \" \" + str(self.speed) + \" \" +", "speed, next_action=None): super(MazeCorridorAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance", "self.next_action is not None: log(LOG_LEVEL_INFO, \"Finished turning around the corner", "L45_90 = 45 L90_135 = 90 L135_180 = 135 L180_225", "self.right_wall, self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle: self.back_wall} self.points = {0:", "* 17 # state.radar.radar[270] = SQRT2 * 13 # state.radar.radar[225]", "wall.wall_point_kind self.distances = {p: getPointDistance(state, p) for p in self.POINTS}", "self.distance) # pyroslib.publish(\"move/steer\", str(self.distance) + \" \" + str(self.speed)) def", "preferable_wall, wall_adjust, second_wall): if wall.angle is None and self.distances[wall.ds_angle] is", "turn around corner at distance {:04d} at speed {:04d}, start", "speed {:04d}, start heading {:07.3f}, requested heading {:07.3f}\".format(self.distance, self.speed, self.start_heading,", "math.pi / 4 elif distance_error < 0 and distance_error <", "3d} ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={:", "math import pyroslib import pyroslib.logging import time from pyroslib.logging import", "state.left_wall_angle if -min_angle < state.left_wall_angle < min_angle: distance = 1000000000", "return distance return None def updateUndefinedWall(wall, preferable_wall, wall_adjust, second_wall): if", "int(left_distances), int(right_distances))) return self.right_corner_action if state.right_front_distance_of_wall > 100 and state.left_front_distance_of_wall", "# printWalls() state.radar.radar[180] = 50 state.radar.radar[315] = 30 attitude.calculate(state) printWalls()", "self.right_corner_action if state.right_front_distance_of_wall > 100 and state.left_front_distance_of_wall > 100 and", "# Copyright 2016-2019 Games Creators Club # # MIT License", "not calculated\".format(a)) else: angle = int(attitude.lines[a].angle * 180 / math.pi)", "math.pi), int(0), int(0 * 180 / math.pi), int(0), int(steer_speed), int(distance_speed),", "self.wall_point_kind self.setAngle(lline.angle, distances) elif mline.angle is not None and pmid", "* distance)) else: self.distance = abs(int(math.cos(angle) * distance)) def setAngleAndDistance(self,", "450/10 - 45deg elif distance_error < 0 and distance_error <", "point == MazeAttitude.BACK_WALL: wall = \"back wall\" else: wall =", "distance = int(distance) angle = int(angle * 180 / math.pi)", "elif -150 < distance < 0: distance = -150 distance_from_wall", "distance = -150 distance = -distance distance_from_wall = state.right_wall_distance distance_error", "steer=5-7, dist=4 # self.speed = 150 # 150 speed =", "def printWalls(): for p in attitude.points: printWallLines(p) for w in", "log(LOG_LEVEL_INFO, \"Found final corner - turning to finish, rfd={: 4d}", "\"Found final corner - turning to finish, rfd={: 4d} fd={:", "SQRT2 * 10} radar_last_values = {0: 10, 45: SQRT2 *", "mline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[rline.long_point_index] angle = MazeAttitude.normAngle(mline.angle +", "<= -PIhalf: a = a + math.pi return a class", "self return self.next_action if __name__ == \"__main__\": from rover import", "attitude.points[a] if point is None: print(\"{:3d} -> line at {:3d}", "lline.angle / MazeAttitude.ANGLE_TOLLERANCE: if plong1 == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind", "from rover import Radar, RoverState radar_values = {0: 10, 45:", "right_point_index): self.ds_angle = distance_sensor_angle self.ds_index = distance_sensor_index self.wall_point_kind = wall_point_kind", "self.left_or_right == self.RIGHT: wall_angle = state.right_wall_angle if -min_angle < state.right_wall_angle", "state.radar.radar[90] + state.radar.radar[45] if left_distances > right_distances: log(LOG_LEVEL_INFO, \"Found corner", "0, 90: 0, 135: 0, 180: 0, 225: 0, 270:", "RoverState radar_values = {0: 10, 45: SQRT2 * 10, 90:", "is not None: if lline.angle * MazeAttitude.ANGLE_TOLLERANCE >= mline.angle >=", "points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif plong1 != MazeAttitude.UNKNOWN and", "state.radar.radar[90] distance_error = distance_from_wall - self.distance angle = 0 if", "from rover import normaiseAngle, angleDiference from challenge_utils import Action, PID", "0, 270: 0, 315: 0} def calculate(self, state): def getPointDistance(state,", "line_index self.short_point_index = short_point_index self.long_point_index = long_point_index self.factor = factor", "== 180 self.selected_line = None self.angle = None self.distance =", "def end(self): super(MazeTurnAroundCornerAction, self).end() def next(self): heading = state.heading.heading self.error", "is None: print(\"Wall {:3d} -> has angle {:3d} but is", "updateUndefinedWall(self.back_wall, self.right_wall, PIhalf, self.left_wall) updateUndefinedWall(self.right_wall, self.front_wall, PIhalf, self.back_wall) updateUndefinedWall(self.left_wall, self.front_wall,", "MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] =", "= \"back wall\" else: wall = \"no wall\" print(\"{:3d} ->", "10, 135: SQRT2 * 10, 180: 10, 225: SQRT2 *", "int(attitude.lines[a].angle * 180 / math.pi) point = attitude.points[a] if point", "10 and by 180 -> 450/10 - 45deg elif distance_error", "self.Line(self.L45_90, 45, 90, -1, PIhalf), self.L90_135: self.Line(self.L90_135, 135, 90, 1,", "45, 90, 135) self.left_wall = self.Wall(270, 6, self.LEFT_WALL, 180, 225,", "and not self.been_in_chicane and front_distance > 300 and left_diagonal_distance >", "self.been_in_chicane = True return ChicaneAction(self, self.LEFT, self.distance, self.speed, next_action=self) else:", "diagonal_distance = state.radar.radar[45] else: diagonal_distance = state.radar.radar[315] if self.left_or_right ==", ">= dlong2 and plong2 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:", "calc gaps class MoveForwardOnOdo(Action): def __init__(self, agent, stop_action=None): super(MoveForwardOnOdo, self).__init__(agent)", "0, 225: 0, 270: 0, 315: 0} self.distances = {0:", "int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return \"Chicane \" + (\"L\"", "4d} d={: 4d} s={: 3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else:", "None: lsqrt2 = long_distance / SQRT2 self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2", "lline.angle >= lmline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[lline.long_point_index] angle =", "ChicaneAction(self, self.LEFT, self.distance, self.speed, next_action=self) else: log(LOG_LEVEL_INFO, \"Found corner -", "100: log(LOG_LEVEL_INFO, \"Front distance not correct: d={:4d} s={:2d} delta={:4d}\".format(front_distance, state.radar.status[0],", "class MazeAttitude: UNKNOWN = 0 LEFT_WALL = 1 RIGHT_WALL =", "lline.angle is not None and plong1 == MazeAttitude.UNKNOWN and pmid", "90 L135_180 = 135 L180_225 = 180 L225_270 = 225", "self.left_or_right == self.RIGHT: distance = -1000000000 distance_from_wall = state.radar.radar[90] distance_error", "= MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 - short_distance) * self.factor + self.adjust) else:", "speed_distance_fudge_factor = 4 # 4 min_angle = 1 * math.pi", "dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}\".format( float(time.time()),", "plong2 != MazeAttitude.UNKNOWN and mline.angle * MazeAttitude.ANGLE_TOLLERANCE >= rline.angle >=", "PIhalf), self.L270_315: self.Line(self.L270_315, 315, 270, 1, -PIhalf)} self.right_wall = self.Wall(90,", "printWallLines(p) for w in attitude.walls: printWall(w) print(\"----------------------------------------------------------\") # attitude.calculate(state) #", "# divide with 10 and by 180 -> 450/10 -", "/ distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS, \"Domain error wa={:", "100 and state.left_front_distance_of_wall > 100 and front_distance < 700: log(LOG_LEVEL_INFO,", "-> has angle {:3d} and is at {:3d}\".format(w.ds_angle, int(w.angle *", "self.rover.getRoverState() left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0] gain = 60", "self).end() def next(self): if self.left_or_right == self.LEFT: diagonal_distance = state.radar.radar[45]", "wall\" elif point == MazeAttitude.FRONT_WALL: wall = \"front wall\" elif", "and self.error < 0: return self else: if self.next_action is", "self.rover.getRoverState() front_distance = state.radar.radar[0] gain = 60 offset = 150", "= MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance * 1), self.speed, self) # self.right_corner_action", "state.heading.heading self.error = self.pid.process(self.requested_heading, heading) if self.left_or_right == self.LEFT and", "180, 1, -math.pi), self.L225_270: self.Line(self.L225_270, 225, 270, -1, PIhalf), self.L270_315:", "def __init__(self, agent, stop_action=None): super(MoveForwardOnOdo, self).__init__(agent) self.stop_action = stop_action self.required_odo", "self.left_wall) updateUndefinedWall(self.back_wall, self.right_wall, PIhalf, self.left_wall) updateUndefinedWall(self.right_wall, self.front_wall, PIhalf, self.back_wall) updateUndefinedWall(self.left_wall,", "180 -> 450/10 - 45deg else: try: angle = -math.asin(distance_error", "\"0 \" + str(self.speed)) log(LOG_LEVEL_INFO, \"Going forward for \" +", "self.a3 = 135 else: self.a1 = 315 self.a2 = 270", "= True if do_stop: return self.stop_action else: return self def", "elif a <= -PIhalf: a = a + math.pi return", "int(distance), int(speed))) distance = int(distance) angle = int(angle * 180", "getPointDistance(state, p) for p in self.POINTS} for line in self.lines:", "\" + str(angle)) wheel_orientations = state.wheel_odos.odos # log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={:", "= 4 BACK_WALL = 8 NO_GAP = 0 FORWARD_GAP =", "correct value! speed_steer_fudge_factor = 5 # 5-7 speed_distance_fudge_factor = 4", "super(ChicaneAction, self).start() def end(self): super(ChicaneAction, self).end() def next(self): if self.left_or_right", "dlong2 and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.left_point_index]", "return \"Forward ODO\" class MazeAction(Action): LEFT = -1 RIGHT =", "True return ChicaneAction(self, self.LEFT, self.distance, self.speed, next_action=self) else: log(LOG_LEVEL_INFO, \"Found", "radar, None, None) def printWallLines(a): if attitude.lines[a].angle is None: print(\"{:3d}", "= distances[self.right_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif dlong1", "4d} ld={: 4d} rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.right_corner_action if", "270, 315] WALLS = [90, 270, 0, 180] L0_45 =", "< 0 and distance_error < -distance_speed: angle = math.pi /", "points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) else: if dlong1 < dlong2", "expected_diagonal_distance = front_distance * math.cos(state.left_wall_angle) * SQRT2 if False and", ">= lmline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[lline.long_point_index] angle = MazeAttitude.normAngle(mline.angle", "0, self.FRONT_WALL, 270, 315, 0, 45) self.back_wall = self.Wall(180, 4,", "distance_speed = speed * speed_distance_fudge_factor if self.left_or_right == self.RIGHT: distance", "next action spectified.\") return self.next_action def execute(self): state = self.rover.getRoverState()", "gain = 60 offset = 150 # Values that worked", "3d} heading={: 3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(0 * 180 /", "= points[self.left_point_index] pmid = points[self.mid_point_index] plong2 = points[self.right_point_index] if dlong1", "is too far - not calculated\".format(w.ds_angle)) else: if w.distance is", "45deg elif distance_error < 0 and distance_error < -distance_speed: angle", "self.RIGHT: distance = -1000000000 distance_from_wall = state.radar.radar[90] distance_error = distance_from_wall", "False def start(self): super(MazeCorridorAction, self).start() self.been_in_chicane = False def end(self):", "if w.distance is None: print(\"Wall {:3d} -> has angle {:3d}", "5 # 5-7 speed_distance_fudge_factor = 4 # 4 min_angle =", "3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: wall_angle = state.left_wall_angle if", "def getActionName(self): return \"Corridor\" class MazeTurnAroundCornerAction(MazeAction): def __init__(self, agent, left_or_right,", "return self.next_action def execute(self): state = self.rover.getRoverState() heading = state.heading.heading", "3d} heading={: 3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(wall_angle * 180 /", "sorted(wls, key=lambda wall: self.distances[wall.ds_angle]) for wall in wall_processing_order: wall.tryFindingWall(self.distances, self.lines,", "is not None: log(LOG_LEVEL_INFO, \"Finished turning around the corner -", "int(distance_error), int(distance), int(speed))) else: distance = 1000000000 distance_from_wall = state.radar.radar[270]", "- 45deg else: try: angle = -math.asin(distance_error / distance_speed) except", "10, 225: SQRT2 * 10, 270: 10, 315: SQRT2 *", "MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN: points[self.mid_point_index]", "0: return self else: if self.next_action is not None: log(LOG_LEVEL_INFO,", "def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(MazeCorridorAction, self).__init__(agent) self.left_or_right", "270, 1, -PIhalf)} self.right_wall = self.Wall(90, 2, self.RIGHT_WALL, 0, 45,", "def end(self): super(MoveForwardOnOdo, self).end() def next(self): state = self.rover.getRoverState() do_stop", "def normAngle(a): if a > PIhalf: a = a -", "0, 'fr': 0, 'bl': 0, 'br': 0} def setRequiredOdo(self, distance):", "min_angle: distance = 1000000000 else: distance = steer_speed / state.right_wall_angle", "4d} ld={: 4d} rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.left_corner_action else:", "plong1 = points[self.left_point_index] pmid = points[self.mid_point_index] plong2 = points[self.right_point_index] if", "self.time > 0: self.time -= 1 log(LOG_LEVEL_INFO, \"Going forward for", "wall_angle = state.right_wall_angle if -min_angle < state.right_wall_angle < min_angle: distance", "for wheel in self.required_odo: self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel]) log(LOG_LEVEL_DEBUG,", "turning left, fd={: 4d} ld={: 4d} rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances)))", "la={: 3d} ld={: 3d} ra={: 3d} rd={: 3d} s_spd={: 3d}", "de={: 4d} d={: 4d} s={: 3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed)))", "lsqrt2 - short_distance) * self.factor + self.adjust) else: self.angle =", "1000000000 else: distance = steer_speed / state.left_wall_angle if 0 <=", "+ state.radar.radar[45] if left_distances > right_distances: log(LOG_LEVEL_INFO, \"Found corner 2", "self.speed, self) # self.right_corner_action = MazeTurnAroundCornerAction(self.odo, self.radar, self.heading, self.RIGHT, self.distance,", "= state.heading.heading last_heading = self.last_heading self.last_heading = heading log(LOG_LEVEL_INFO, \"Turning", "state.left_wall_distance distance_error = distance_from_wall - self.distance angle = 0 if", "int(distance_error), int(0 * 180 / math.pi), int(0), int(0 * 180", "__init__(self, agent, left_or_right, distance, speed, next_action=None): super(MazeCorridorAction, self).__init__(agent) self.left_or_right =", "self.distance = distance * (1 if left_or_right == self.RIGHT else", "and lmline.angle * MazeAttitude.ANGLE_TOLLERANCE >= lline.angle >= lmline.angle / MazeAttitude.ANGLE_TOLLERANCE:", "0, 'bl': 0, 'br': 0} def setRequiredOdo(self, distance): for wheel_name", "MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind if plong2 == MazeAttitude.UNKNOWN: points[self.right_point_index] =", "-1, PIhalf), self.L90_135: self.Line(self.L90_135, 135, 90, 1, -PIhalf), self.L135_180: self.Line(self.L135_180,", "self.back_wall) updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf, self.back_wall) # TODO calc gaps class", "if self.left_or_right == MazeAction.RIGHT: self.a1 = 45 self.a2 = 90", "state = self.rover.getRoverState() for wheel in self.required_odo: self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel]", "= SQRT2 * 13 # state.radar.radar[225] = SQRT2 * 12", "= {self.L315_0: self.Line(self.L315_0, 315, 0, -1, math.pi), self.L0_45: self.Line(self.L0_45, 45,", "-= 1 log(LOG_LEVEL_INFO, \"Going forward for \" + str(self.time) +", "if -min_angle < state.right_wall_angle < min_angle: distance = 1000000000 else:", "self.lines[line].calcAngle(self.distances) wls = [self.walls[w_ds_angle] for w_ds_angle in self.WALLS if self.distances[w_ds_angle]", "= state.radar.status[angle] if status == 0: return distance last_distance =", "MazeCorridorAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(MazeCorridorAction, self).__init__(agent)", "lines, points): lmline = lines[self.left_mid_point_index] lline = lines[self.left_point_index] mline =", "self.error = 0 def start(self): super(MazeTurnAroundCornerAction, self).start() state = self.rover.getRoverState()", "\".format(int(state.right_front_distance_of_wall), int(front_distance))) return self.right_corner_action return self def execute(self): state =", "state.radar.last_radar[angle] if abs(distance - last_distance) < 100: return distance return", "distance = -1000000000 distance_from_wall = state.radar.radar[90] distance_error = distance_from_wall -", "8 NO_GAP = 0 FORWARD_GAP = 1 SIDE_GAP = 2", "MazeAttitude.UNKNOWN: points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) else: if dlong1 <", "else: return self def execute(self): pass def getActionName(self): return \"Forward", "0 elif distance_error > 0 and distance_error > distance_speed: angle", "* MazeAttitude.ANGLE_TOLLERANCE >= rline.angle >= mline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] =", "self.POINTS} for line in self.lines: self.lines[line].calcAngle(self.distances) wls = [self.walls[w_ds_angle] for", "self.wall_point_kind self.setAngle(mline.angle, distances) elif lline.angle is not None and plong1", "a - math.pi elif a <= -PIhalf: a = a", "str(self.speed)) log(LOG_LEVEL_INFO, \"Going forward for \" + str(self.time) + \"", "DriverForwardForTimeAction(self, 10, self.speed, None)) def start(self): super(ChicaneAction, self).start() def end(self):", "state.radar.status[angle] if status == 0: return distance last_distance = state.radar.last_radar[angle]", "not None and short_distance is not None: lsqrt2 = long_distance", "else: wall = \"no wall\" print(\"{:3d} -> line at {:3d}", "MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance * 1), self.speed, self) # self.right_corner_action =", "< 550: expected_diagonal_distance = 0 if state.left_wall_angle < 0: expected_diagonal_distance", "self.wall_point_kind = wall_point_kind self.left_mid_point_index = left_mid_point_index self.left_point_index = left_point_index self.mid_point_index", "import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG from rover import WheelOdos, WHEEL_NAMES", "distance < 150: distance = 150 elif -150 < distance", "rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={: 3d}", "None) def printWallLines(a): if attitude.lines[a].angle is None: print(\"{:3d} -> point", "is too far - distance not calculated\".format(w.ds_angle, int(w.angle * 180", "SQRT2 * 10, 180: 10, 225: SQRT2 * 10, 270:", "math.pi / 180 steer_speed = speed * speed_steer_fudge_factor distance_speed =", "rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.left_corner_action else: log(LOG_LEVEL_INFO, \"Found corner", "-distance_speed: angle = -math.pi / 4 else: try: angle =", "points[rline.long_point_index] angle = MazeAttitude.normAngle(mline.angle + PIhalf) distance = distances[self.left_point_index] *", "wheel_orientations = state.wheel_odos.odos log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={:", "start(self): super(MoveForwardOnOdo, self).start() state = self.rover.getRoverState() for wheel in self.required_odo:", "50 # mm/second - TODO use odo to update to", "0, 'br': 0} def setRequiredOdo(self, distance): for wheel_name in WHEEL_NAMES:", "heading, last_heading, angleDiference(heading, last_heading), self.requested_heading, self.error)) def getActionName(self): return \"Turn-Around-Corner\"", "self.left_point_index = left_point_index self.mid_point_index = mid_point_index self.right_point_index = right_point_index self.is_front_or_back", "part of chicane, rfd={: 4d}\".format(int(diagonal_distance))) self.left_or_right = self.RIGHT elif self.left_or_right", "self.a1 = 45 self.a2 = 90 self.a3 = 135 else:", "self.line_index = line_index self.short_point_index = short_point_index self.long_point_index = long_point_index self.factor", "angle = -math.pi / 4 else: try: angle = math.asin(distance_error", "angle = math.pi / 4 elif distance_error < 0 and", ">= lline.angle >= lmline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[lline.long_point_index] angle", "int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) return self.left_corner_action if front_distance < 550 and", "= left_or_right self.distance = distance * (1 if left_or_right ==", "DriverForwardForTimeActoun(10, self.speed, None)) self.been_in_chicane = False def start(self): super(MazeCorridorAction, self).start()", "else: wall_angle = state.left_wall_angle if -min_angle < state.left_wall_angle < min_angle:", "[90, 270, 0, 180] L0_45 = 0 L45_90 = 45", "import pyroslib.logging import time from pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO,", "== MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances)", "def updateUndefinedWall(wall, preferable_wall, wall_adjust, second_wall): if wall.angle is None and", "point == MazeAttitude.RIGHT_WALL: wall = \"right wall\" elif point ==", "= None def setAngle(self, angle, distances): self.angle = angle distance", "int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return \"Chicane \" +", "L225_270, L270_315, L315_0] ANGLE_TOLLERANCE = 1.075 @staticmethod def normAngle(a): if", "SIDE_GAP = 2 POINTS = [0, 45, 90, 135, 180,", "800: log(LOG_LEVEL_INFO, \"Found second part of chicane, rfd={: 4d}\".format(int(diagonal_distance))) self.left_or_right", "s={: 3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: distance = 1000000000", "1000000000 distance_from_wall = state.radar.radar[270] distance_error = distance_from_wall - self.distance angle", "/ math.pi), int(distance_from_wall), int(distance_error), int(state.left_wall_angle * 180 / math.pi), int(state.left_front_distance_of_wall),", "Line: def __init__(self, line_index, long_point_index, short_point_index, factor, adjust): self.line_index =", "distance_from_wall = state.radar.radar[90] distance_error = distance_from_wall - self.distance angle =", "3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}\".format(int(0), int(distance_from_wall),", "w.distance)) def printWalls(): for p in attitude.points: printWallLines(p) for w", "if left_or_right == self.RIGHT else -1) self.speed = speed self.start_heading", "0 if state.left_wall_angle < 0: expected_diagonal_distance = front_distance * 2", "distances) else: if dlong1 < dlong2 and plong1 == MazeAttitude.UNKNOWN", "\"Found corner - turning, lfd={: 4d} fd={: 4d} dd={: 4d}", ">= rline.angle >= mline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[rline.long_point_index] angle", "radar_values, radar_status, Radar(0, radar_last_values, radar_status)) state = RoverState(None, None, None,", "MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] =", "starting...\") self.rover.command(pyroslib.publish, 300, 120) # pyroslib.publish(\"move/steer\", \"300 120\") def end(self):", "if w.angle is None: print(\"Wall {:3d} -> is too far", "= [90, 270, 0, 180] L0_45 = 0 L45_90 =", "def printWall(w): if w.angle is None: print(\"Wall {:3d} -> is", "if self.distances[w_ds_angle] is not None] wall_processing_order = sorted(wls, key=lambda wall:", "do_stop: return self.stop_action else: return self def execute(self): pass def", "180] L0_45 = 0 L45_90 = 45 L90_135 = 90", "= self.RIGHT elif self.left_or_right == self.RIGHT and diagonal_distance > 800:", "worked speed=150, steer=5-7, dist=4 # self.speed = 150 # 150", "\"front wall\" elif point == MazeAttitude.BACK_WALL: wall = \"back wall\"", "lines[self.right_point_index] dlong1 = distances[lline.long_point_index] dmid = distances[mline.short_point_index] dlong2 = distances[mline.long_point_index]", "right_point_index self.is_front_or_back = self.ds_angle == 0 or self.ds_angle == 180", "self.back_wall) # TODO calc gaps class MoveForwardOnOdo(Action): def __init__(self, agent,", "= self.rover.getRoverState() front_distance = state.radar.radar[0] gain = 60 offset =", "= state.radar.radar[90] + state.radar.radar[45] if left_distances > right_distances: log(LOG_LEVEL_INFO, \"Found", "450: angle -= math.pi * (450 - front_distance) / 1800", ">= dlong2 and plong2 != MazeAttitude.UNKNOWN and mline.angle * MazeAttitude.ANGLE_TOLLERANCE", "rover import Radar, RoverState radar_values = {0: 10, 45: SQRT2", "= 1.075 @staticmethod def normAngle(a): if a > PIhalf: a", "points[self.mid_point_index] = self.wall_point_kind if plong2 == MazeAttitude.UNKNOWN: points[self.right_point_index] = self.wall_point_kind", "= math.pi / 2 class MazeAttitude: UNKNOWN = 0 LEFT_WALL", "\" + str(self.speed)) log(LOG_LEVEL_INFO, \"Going forward for \" + str(self.time)", "# state.radar.radar[0] = 5 # state.radar.radar[45] = SQRT2 * 5", "__init__(self, agent): super(MazeAction, self).__init__(agent) def check_next_action_conditions(self): return self class ChicaneAction(MazeAction):", "4d} rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.right_corner_action if state.right_front_distance_of_wall >", "\"Going forward for \" + str(self.time) + \" ticks.\") def", "MazeAttitude.ANGLE_TOLLERANCE >= rline.angle >= mline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[rline.long_point_index]", "left_or_right == self.RIGHT else -1) self.speed = speed self.start_heading =", "self.Line(self.L180_225, 225, 180, 1, -math.pi), self.L225_270: self.Line(self.L225_270, 225, 270, -1,", "4d} \".format(int(state.right_front_distance_of_wall), int(front_distance))) return self.right_corner_action return self def execute(self): state", "self.right_point_index = right_point_index self.is_front_or_back = self.ds_angle == 0 or self.ds_angle", "gaps class MoveForwardOnOdo(Action): def __init__(self, agent, stop_action=None): super(MoveForwardOnOdo, self).__init__(agent) self.stop_action", "0 self.pid = None self.next_action = next_action self.error = 0", "turning - no next action spectified.\") return self.next_action def execute(self):", "self.RIGHT and self.error < 0: return self else: if self.next_action", "\" + str(self.speed)) def end(self): super(MazeTurnAroundCornerAction, self).end() def next(self): heading", "lline = lines[self.left_point_index] mline = lines[self.mid_point_index] rline = lines[self.right_point_index] dlong1", "L45_90, L90_135, L135_180, L180_225, L225_270, L270_315, L315_0] ANGLE_TOLLERANCE = 1.075", "* 180 / math.pi) point = attitude.points[a] if point is", "\" + str(self.time) + \" ticks.\") return self return self.next_action", "print(\"{:3d} -> point too far - not calculated\".format(a)) else: angle", "= -math.pi / 4 if front_distance < 450: angle -=", "MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) elif", "int(self.distance * 1), self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance", "self.factor = factor self.adjust = adjust self.angle = None def", "self.rover.command(pyroslib.publish, self.speed, 0, self.distance) # pyroslib.publish(\"move/steer\", str(self.distance) + \" \"", "self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance * 1), self.speed, self) #", "= distance_sensor_index self.wall_point_kind = wall_point_kind self.left_mid_point_index = left_mid_point_index self.left_point_index =", "4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) self.been_in_chicane = True return", "front_distance > 300 and left_diagonal_distance > expected_diagonal_distance * 1.2: log(LOG_LEVEL_INFO,", "self).__init__(agent) self.left_or_right = left_or_right self.distance = distance self.speed = speed", "1 * math.pi / 180 steer_speed = speed * speed_steer_fudge_factor", "in wall_processing_order: wall.tryFindingWall(self.distances, self.lines, self.points) updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf, self.left_wall) updateUndefinedWall(self.back_wall,", "distance) elif lline.angle is not None and mline.angle is not", "-1, math.pi), self.L0_45: self.Line(self.L0_45, 45, 0, 1, -math.pi), self.L45_90: self.Line(self.L45_90,", "45: SQRT2 * 10, 90: 10, 135: SQRT2 * 10,", "4d} d={: 4d} s={: 3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else:", "> expected_diagonal_distance * 1.2: log(LOG_LEVEL_INFO, \"Found chicane... lfd={: 4d} fd={:", "self.been_in_chicane = False def end(self): super(MazeCorridorAction, self).end() def next(self): left_diagonal_distance", "angle belogs to {:s}\".format(a, angle, wall)) def printWall(w): if w.angle", "self.required_odo: self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel]) log(LOG_LEVEL_DEBUG, \"Reset odo to", "4d}\".format(int(diagonal_distance))) self.left_or_right = self.RIGHT elif self.left_or_right == self.RIGHT and diagonal_distance", "ticks.\") def end(self): pass def next(self): if self.time > 0:", "= next_action self.error = 0 def start(self): super(MazeTurnAroundCornerAction, self).start() state", "requested heading {:07.3f}\".format(self.distance, self.speed, self.start_heading, self.requested_heading)) self.rover.command(pyroslib.publish, self.speed, 0, self.distance)", "= self.pid.process(self.requested_heading, heading) if self.left_or_right == self.LEFT and self.error >", "270, 315, 0, 45) self.back_wall = self.Wall(180, 4, self.BACK_WALL, 90,", "line in self.lines: self.lines[line].calcAngle(self.distances) wls = [self.walls[w_ds_angle] for w_ds_angle in", "print(\"{:3d} -> line at {:3d} angle belogs to {:s}\".format(a, angle,", "distance_error < 0 and distance_error < -distance_speed: angle = -math.pi", "front_distance = state.radar.radar[0] if state.radar.status[0] != 0 and abs(state.radar.radar_deltas[0]) >", "dlong2 and plong2 != MazeAttitude.UNKNOWN and mline.angle * MazeAttitude.ANGLE_TOLLERANCE >=", "self.distances = {0: 0, 45: 0, 90: 0, 135: 0,", "pass def next(self): if self.time > 0: self.time -= 1", "lline.angle * MazeAttitude.ANGLE_TOLLERANCE >= mline.angle >= lline.angle / MazeAttitude.ANGLE_TOLLERANCE: if", "distance def start(self): super(MoveForwardOnOdo, self).start() state = self.rover.getRoverState() for wheel", "self.Line(self.L135_180, 135, 180, -1, math.pi), self.L180_225: self.Line(self.L180_225, 225, 180, 1,", "\" + str(self.time) + \" ticks.\") def end(self): pass def", "90: 10, 135: SQRT2 * 10, 180: 10, 225: SQRT2", "point == MazeAttitude.LEFT_WALL: wall = \"left wall\" elif point ==", "self.setAngle(mline.angle, distances) def __init__(self): self.lines = {self.L315_0: self.Line(self.L315_0, 315, 0,", "self.RIGHT else -1) self.speed = speed self.start_heading = 0 self.last_heading", "and mline.angle is not None: if lline.angle * MazeAttitude.ANGLE_TOLLERANCE >=", "/ math.pi), int(0), int(0 * 180 / math.pi), int(0), int(steer_speed),", "s={: 3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance = int(distance) angle", "distance, speed, next_action=None): super(MazeTurnAroundCornerAction, self).__init__(agent) self.left_or_right = left_or_right self.distance =", "+ str(self.required_odo)) for wheel_name in WHEEL_NAMES: if state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]:", "front_distance < 450: angle += math.pi * (450 - front_distance)", "self.ds_angle == 180 self.selected_line = None self.angle = None self.distance", "150: distance = 150 elif -150 < distance < 0:", "left_diagonal_distance > expected_diagonal_distance * 1.2: log(LOG_LEVEL_INFO, \"Found chicane... lfd={: 4d}", "but is too far - distance not calculated\".format(w.ds_angle, int(w.angle *", "Wall: def __init__(self, distance_sensor_angle, distance_sensor_index, wall_point_kind, left_mid_point_index, left_point_index, mid_point_index, right_point_index):", "90, -1, PIhalf), self.L90_135: self.Line(self.L90_135, 135, 90, 1, -PIhalf), self.L135_180:", "distances) elif mline.angle is not None and pmid == MazeAttitude.UNKNOWN", "wall_adjust), self.distances[wall.mid_point_index]) else: wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust), self.distances[wall.mid_point_index]) self.points[wall.ds_angle] = wall.wall_point_kind", "180 / math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle * 180 / math.pi), int(state.right_front_distance_of_wall),", "s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={: 3d}", "= state.left_wall_angle if -min_angle < state.left_wall_angle < min_angle: distance =", "self.error < 0: return self else: if self.next_action is not", "MazeAttitude: UNKNOWN = 0 LEFT_WALL = 1 RIGHT_WALL = 2", "* (1 if left_or_right == self.RIGHT else -1) self.speed =", "== self.LEFT and self.error > 0: return self elif self.left_or_right", "3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: distance = 1000000000 distance_from_wall", "315: 0} attitude = MazeAttitude() radar = Radar(0, radar_values, radar_status,", "4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.right_corner_action if state.right_front_distance_of_wall > 100 and", "__init__(self): self.lines = {self.L315_0: self.Line(self.L315_0, 315, 0, -1, math.pi), self.L0_45:", "= steer_speed / state.left_wall_angle if 0 <= distance < 150:", "return self.next_action if __name__ == \"__main__\": from rover import Radar,", "too far - not calculated\".format(a)) else: angle = int(attitude.lines[a].angle *", "450: angle += math.pi * (450 - front_distance) / 1800", "wall in wall_processing_order: wall.tryFindingWall(self.distances, self.lines, self.points) updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf, self.left_wall)", "dh={:07.3f} rh={:07.3f} e={:07.3f}\" .format(self.speed, heading, last_heading, angleDiference(heading, last_heading), self.requested_heading, self.error))", "error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={:", "chicane, rfd={: 4d}\".format(int(diagonal_distance))) self.left_or_right = self.RIGHT elif self.left_or_right == self.RIGHT", "> 800: log(LOG_LEVEL_INFO, \"Found second part of chicane, rfd={: 4d}\".format(int(diagonal_distance)))", "= MazeAttitude() radar = Radar(0, radar_values, radar_status, Radar(0, radar_last_values, radar_status))", "= speed * speed_distance_fudge_factor if self.left_or_right == self.RIGHT: distance =", "angle = -math.pi / 4 elif distance_error < 0 and", "if self.left_or_right == self.LEFT and self.error > 0: return self", "= 5 # state.radar.radar[45] = SQRT2 * 5 * 0.9", "0 LEFT_WALL = 1 RIGHT_WALL = 2 FRONT_WALL = 4", "< 0: return self else: if self.next_action is not None:", "__init__(self, distance_sensor_angle, distance_sensor_index, wall_point_kind, left_mid_point_index, left_point_index, mid_point_index, right_point_index): self.ds_angle =", "pyroslib.publish(\"move/drive\", \"0 \" + str(self.speed)) log(LOG_LEVEL_INFO, \"Going forward for \"", "lines[self.left_point_index] mline = lines[self.mid_point_index] rline = lines[self.right_point_index] dlong1 = distances[lline.long_point_index]", "if self.left_or_right == self.LEFT: diagonal_distance = state.radar.radar[45] else: diagonal_distance =", "log(LOG_LEVEL_INFO, \"Going forward for \" + str(self.time) + \" ticks.\")", "-min_angle < state.left_wall_angle < min_angle: distance = 1000000000 else: distance", "= -150 distance = -distance distance_from_wall = state.right_wall_distance distance_error =", "- front_distance) / 1800 # divide with 10 and by", "update to correct value! speed_steer_fudge_factor = 5 # 5-7 speed_distance_fudge_factor", "int(0 * 180 / math.pi), int(distance_from_wall), int(distance_error), int(0 * 180", "* math.cos(math.pi / 4 + state.left_wall_angle) else: expected_diagonal_distance = front_distance", "def __init__(self, distance_sensor_angle, distance_sensor_index, wall_point_kind, left_mid_point_index, left_point_index, mid_point_index, right_point_index): self.ds_angle", "elif mline.angle is not None and pmid == MazeAttitude.UNKNOWN and", "3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}\".format(int(wall_angle), int(distance_from_wall),", "from challenge_utils import Action, PID SQRT2 = math.sqrt(2) PIhalf =", "int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return \"Corridor\" class", "attitude.calculate(state) # printWalls() # # state.radar.radar[0] = 5 # state.radar.radar[45]", "= MazeAttitude.normAngle(mline.angle + PIhalf) distance = distances[self.left_point_index] * abs(math.sin(mline.angle) /", "0: return distance last_distance = state.radar.last_radar[angle] if abs(distance - last_distance)", "self.left_wall) updateUndefinedWall(self.right_wall, self.front_wall, PIhalf, self.back_wall) updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf, self.back_wall) #", "distance_error > distance_speed: angle = math.pi / 4 if front_distance", "pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle,", "self.next_action if __name__ == \"__main__\": from rover import Radar, RoverState", "short_distance is not None: lsqrt2 = long_distance / SQRT2 self.angle", "str(self.speed) + \" \" + str(angle)) wheel_orientations = state.wheel_odos.odos #", "= -math.asin(distance_error / distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS, \"Domain", "self.L225_270: self.Line(self.L225_270, 225, 270, -1, PIhalf), self.L270_315: self.Line(self.L270_315, 315, 270,", "points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) elif plong1", "RIGHT = 1 def __init__(self, agent): super(MazeAction, self).__init__(agent) def check_next_action_conditions(self):", ">= lline.angle / MazeAttitude.ANGLE_TOLLERANCE: if plong1 == MazeAttitude.UNKNOWN: points[self.left_point_index] =", "-> line at {:3d} angle\".format(a, angle)) else: if point ==", "class MazeAction(Action): LEFT = -1 RIGHT = 1 def __init__(self,", "and plong2 == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind", "Radar(0, radar_values, radar_status, Radar(0, radar_last_values, radar_status)) state = RoverState(None, None,", "180: 0, 225: 0, 270: 0, 315: 0} self.distances =", "and distance_error > distance_speed: angle = math.pi / 4 elif", "4 # 4 min_angle = 1 * math.pi / 180", "heading={: 3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(0 * 180 / math.pi),", "'br': 0} def setRequiredOdo(self, distance): for wheel_name in WHEEL_NAMES: self.required_odo[wheel_name]", "= None def calcAngle(self, distances): long_distance = distances[self.long_point_index] short_distance =", "next(self): if self.time > 0: self.time -= 1 log(LOG_LEVEL_INFO, \"Going", "= True return ChicaneAction(self, self.LEFT, self.distance, self.speed, next_action=self) else: log(LOG_LEVEL_INFO,", "chicane... lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance),", "return \"Corridor\" class MazeTurnAroundCornerAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed,", "= MazeTurnAroundCornerAction(self, self.LEFT, self.distance, self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT,", "L180_225 = 180 L225_270 = 225 L270_315 = 270 L315_0", "PIhalf: a = a - math.pi elif a <= -PIhalf:", "self.is_front_or_back: self.distance = abs(int(math.sin(angle) * distance)) else: self.distance = abs(int(math.cos(angle)", "class MazeCorridorAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(MazeCorridorAction,", "plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind", "4d} dist_err={: 3d} la={: 3d} ld={: 3d} ra={: 3d} rd={:", "180 / math.pi), int(0), int(0 * 180 / math.pi), int(0),", "self.left_or_right == self.LEFT else \"R\") class MazeCorridorAction(MazeAction): def __init__(self, agent,", "== self.LEFT else \"R\") class MazeCorridorAction(MazeAction): def __init__(self, agent, left_or_right,", "self.LEFT: diagonal_distance = state.radar.radar[45] else: diagonal_distance = state.radar.radar[315] if self.left_or_right", "log(LOG_LEVEL_DEBUG, \"Driving to \" + str(self.required_odo)) for wheel_name in WHEEL_NAMES:", "= self.wall_point_kind if plong2 == MazeAttitude.UNKNOWN: points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle,", "= MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance * 1), self.speed, self) self.right_corner_action =", "License # import math import pyroslib import pyroslib.logging import time", "/ 4 else: try: angle = math.asin(distance_error / distance_speed) except", "distance {:04d} at speed {:04d}, start heading {:07.3f}, requested heading", "and pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind", "is at {:3d}\".format(w.ds_angle, int(w.angle * 180 / math.pi), w.distance)) def", "plong1 != MazeAttitude.UNKNOWN and lmline.angle * MazeAttitude.ANGLE_TOLLERANCE >= lline.angle >=", "elif dlong1 >= dlong2 and plong2 != MazeAttitude.UNKNOWN and mline.angle", "point too far - not calculated\".format(a)) else: angle = int(attitude.lines[a].angle", "< -distance_speed: angle = math.pi / 4 if front_distance <", "wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust), self.distances[wall.mid_point_index]) self.points[wall.ds_angle] = wall.wall_point_kind self.distances = {p:", "4d} rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.left_corner_action else: log(LOG_LEVEL_INFO, \"Found", "int(front_distance))) return self.right_corner_action return self def execute(self): state = self.rover.getRoverState()", "135) self.left_wall = self.Wall(270, 6, self.LEFT_WALL, 180, 225, 270, 315)", "10, 315: SQRT2 * 10} radar_status = {0: 0, 45:", "/ SQRT2) self.setAngleAndDistance(angle, distance) elif lline.angle is not None and", "+ \"; starting...\") self.rover.command(pyroslib.publish, 300, 120) # pyroslib.publish(\"move/steer\", \"300 120\")", "450/10 - 45deg else: try: angle = -math.asin(distance_error / distance_speed)", "self.requested_heading, self.error)) def getActionName(self): return \"Turn-Around-Corner\" class DriverForwardForTimeAction(Action): def __init__(self,", "12 # attitude.calculate(state) # printWalls() state.radar.radar[180] = 50 state.radar.radar[315] =", "self.right_wall = self.Wall(90, 2, self.RIGHT_WALL, 0, 45, 90, 135) self.left_wall", "\"Corridor\" class MazeTurnAroundCornerAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None):", "0.9 # state.radar.radar[315] = SQRT2 * 17 # state.radar.radar[270] =", "state.left_wall_angle < min_angle: distance = 1000000000 else: distance = steer_speed", "dlong2 and plong1 != MazeAttitude.UNKNOWN and lmline.angle * MazeAttitude.ANGLE_TOLLERANCE >=", "return self.right_corner_action return self def execute(self): state = self.rover.getRoverState() left_diagonal_distance", "5-7 speed_distance_fudge_factor = 4 # 4 min_angle = 1 *", "/ math.pi), w.distance)) def printWalls(): for p in attitude.points: printWallLines(p)", "short_distance = distances[self.short_point_index] if long_distance is not None and short_distance", "# 4 min_angle = 1 * math.pi / 180 steer_speed", "diff_method=angleDiference) self.pid.process(self.requested_heading, self.start_heading) log(LOG_LEVEL_INFO, \"Starting to turn around corner at", "0, 270: 0, 315: 0} self.distances = {0: 0, 45:", "\"Turning speed={:04d} h={:07.3f} lh={:07.3f} dh={:07.3f} rh={:07.3f} e={:07.3f}\" .format(self.speed, heading, last_heading,", "-> 450/10 - 45deg elif distance_error < 0 and distance_error", "270 self.a3 = 225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance *", "and front_distance < 700: log(LOG_LEVEL_INFO, \"Found final corner - turning", "self elif self.left_or_right == self.RIGHT and self.error < 0: return", "/ 4 if front_distance < 450: angle -= math.pi *", "angleDiference from challenge_utils import Action, PID SQRT2 = math.sqrt(2) PIhalf", "long_point_index self.factor = factor self.adjust = adjust self.angle = None", "return \"Chicane \" + (\"L\" if self.left_or_right == self.LEFT else", "0, 45) self.back_wall = self.Wall(180, 4, self.BACK_WALL, 90, 135, 180,", "= stop_action self.required_odo = {'fl': 0, 'fr': 0, 'bl': 0,", "# # MIT License # import math import pyroslib import", "state.radar.radar[angle] status = state.radar.status[angle] if status == 0: return distance", "= \"right wall\" elif point == MazeAttitude.FRONT_WALL: wall = \"front", "* 12 # attitude.calculate(state) # printWalls() state.radar.radar[180] = 50 state.radar.radar[315]", "if state.right_front_distance_of_wall > 100 and state.left_front_distance_of_wall > 100 and front_distance", "== self.LEFT and diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found second part", "far - not calculated\".format(w.ds_angle)) else: if w.distance is None: print(\"Wall", "wall = \"front wall\" elif point == MazeAttitude.BACK_WALL: wall =", "points[lline.long_point_index] angle = MazeAttitude.normAngle(mline.angle - PIhalf) distance = distances[self.right_point_index] *", "left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0] if state.radar.status[0] != 0", "return self.left_corner_action else: log(LOG_LEVEL_INFO, \"Found corner 2 - turning left,", "def end(self): super(MazeCorridorAction, self).end() def next(self): left_diagonal_distance = state.radar.radar[315] front_distance", "MazeAttitude.FRONT_WALL: wall = \"front wall\" elif point == MazeAttitude.BACK_WALL: wall", "WHEEL_NAMES: if state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]: do_stop = True if state.radar.radar[0]", "self.Wall(0, 0, self.FRONT_WALL, 270, 315, 0, 45) self.back_wall = self.Wall(180,", "self.ds_angle = distance_sensor_angle self.ds_index = distance_sensor_index self.wall_point_kind = wall_point_kind self.left_mid_point_index", "to \" + str(self.required_odo)) for wheel_name in WHEEL_NAMES: if state.wheel_odos[wheel_name]", "angle = math.asin(distance_error / distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS,", "or state.radar.radar[45] < 1.0: do_stop = True if do_stop: return", "< distance < 0: distance = -150 distance_from_wall = state.left_wall_distance", "offset = 150 # Values that worked speed=150, steer=5-7, dist=4", "elif point == MazeAttitude.BACK_WALL: wall = \"back wall\" else: wall", "log(LOG_LEVEL_INFO, \"Found corner - turning, lfd={: 4d} fd={: 4d} dd={:", "= 45 L90_135 = 90 L135_180 = 135 L180_225 =", "self.distance, self.speed, DriverForwardForTimeAction(self, 10, self.speed, None)) def start(self): super(ChicaneAction, self).start()", "distance < 1: self.distance = 0 else: if self.is_front_or_back: self.distance", "distances[self.long_point_index] short_distance = distances[self.short_point_index] if long_distance is not None and", "action spectified.\") return self.next_action def execute(self): state = self.rover.getRoverState() heading", "ld={: 4d} rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.left_corner_action else: log(LOG_LEVEL_INFO,", "expected_diagonal_distance * 1.2: log(LOG_LEVEL_INFO, \"Found chicane... lfd={: 4d} fd={: 4d}", "> distance_speed: angle = math.pi / 4 if front_distance <", "90: 0, 135: 0, 180: 0, 225: 0, 270: 0,", "- 45deg elif distance_error < 0 and distance_error < -distance_speed:", "from rover import WheelOdos, WHEEL_NAMES from rover import normaiseAngle, angleDiference", "3d} dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}\".format(", "and state.left_front_distance_of_wall > 100 and front_distance < 700: log(LOG_LEVEL_INFO, \"Found", "around the corner - invoking next action \" + self.next_action.getActionName())", "180, -1, math.pi), self.L180_225: self.Line(self.L180_225, 225, 180, 1, -math.pi), self.L225_270:", "4d} s={: 3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance = int(distance)", "state = RoverState(None, None, None, radar, None, None) def printWallLines(a):", "int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) self.been_in_chicane = True return ChicaneAction(self, self.LEFT, self.distance,", "< 0: left_distances = state.radar.radar[270] + state.radar.radar[315] right_distances = state.radar.radar[90]", "left_or_right self.distance = distance * (1 if left_or_right == self.RIGHT", "def start(self): super(MazeCorridorAction, self).start() self.been_in_chicane = False def end(self): super(MazeCorridorAction,", "RIGHT_WALL = 2 FRONT_WALL = 4 BACK_WALL = 8 NO_GAP", "dw={: 4d} de={: 4d} d={: 4d} s={: 3d}\".format(int(0), int(distance_from_wall), int(distance_error),", "far - not calculated\".format(a)) else: angle = int(attitude.lines[a].angle * 180", "\"right wall\" elif point == MazeAttitude.FRONT_WALL: wall = \"front wall\"", "= 270 L315_0 = 315 LINES = [L0_45, L45_90, L90_135,", "end(self): super(MazeCorridorAction, self).end() def next(self): left_diagonal_distance = state.radar.radar[315] front_distance =", "dist_err={: 3d} la={: 3d} ld={: 3d} ra={: 3d} rd={: 3d}", "315, 0, -1, math.pi), self.L0_45: self.Line(self.L0_45, 45, 0, 1, -math.pi),", "math.pi / 2 class MazeAttitude: UNKNOWN = 0 LEFT_WALL =", "elif self.left_or_right == self.RIGHT and self.error < 0: return self", "= short_point_index self.long_point_index = long_point_index self.factor = factor self.adjust =", "super(DriverForwardForTimeAction, self).__init__(agent) self.time = time self.speed = speed self.next_action =", "str(angle)) wheel_orientations = state.wheel_odos.odos log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d} wa={: 3d}", "getActionName(self): return \"Corridor\" class MazeTurnAroundCornerAction(MazeAction): def __init__(self, agent, left_or_right, distance,", "if front_distance < 450: angle += math.pi * (450 -", "- self.distance angle = 0 if abs(distance_error) < 10: angle", "self.speed, None)) self.been_in_chicane = False def start(self): super(MazeCorridorAction, self).start() self.been_in_chicane", "angle, distance): self.angle = angle self.distance = distance def tryFindingWall(self,", "int(angle * 180 / math.pi) self.rover.command(pyroslib.publish, self.speed, angle, distance) #", "= points[self.mid_point_index] plong2 = points[self.right_point_index] if dlong1 < dlong2 and", "if front_distance < 550 and state.radar.radar_deltas[0] < 0: left_distances =", "print(\"----------------------------------------------------------\") # attitude.calculate(state) # printWalls() # # state.radar.radar[0] = 5", "import math import pyroslib import pyroslib.logging import time from pyroslib.logging", "distance_from_wall = state.right_wall_distance distance_error = distance_from_wall - self.distance angle =", "+ \" \" + str(self.speed)) def end(self): super(MazeTurnAroundCornerAction, self).end() def", "do_stop = True if do_stop: return self.stop_action else: return self", "state.right_wall_angle < min_angle: distance = 1000000000 else: distance = steer_speed", "LOG_LEVEL_INFO, LOG_LEVEL_DEBUG from rover import WheelOdos, WHEEL_NAMES from rover import", "calculated\".format(w.ds_angle, int(w.angle * 180 / math.pi))) else: print(\"Wall {:3d} ->", "self.speed, DriverForwardForTimeActoun(10, self.speed, None)) self.been_in_chicane = False def start(self): super(MazeCorridorAction,", "= False def end(self): super(MazeCorridorAction, self).end() def next(self): left_diagonal_distance =", "= a + math.pi return a class Line: def __init__(self,", "315) self.front_wall = self.Wall(0, 0, self.FRONT_WALL, 270, 315, 0, 45)", "and plong2 != MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind", "None and self.distances[wall.ds_angle] is not None: if preferable_wall.angle is not", "getActionName(self): return \"Forward ODO\" class MazeAction(Action): LEFT = -1 RIGHT", "and distance_error < -distance_speed: angle = -math.pi / 4 else:", "fd={: 4d} dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) self.been_in_chicane", "4d} angle={: 3d} heading={: 3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(wall_angle *", "LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG from rover import WheelOdos, WHEEL_NAMES from rover", "mline.angle is not None and pmid == MazeAttitude.UNKNOWN and plong2", "= distances[self.short_point_index] if long_distance is not None and short_distance is", "6, self.LEFT_WALL, 180, 225, 270, 315) self.front_wall = self.Wall(0, 0,", "to {:s}\".format(a, angle, wall)) def printWall(w): if w.angle is None:", "-distance_speed: angle = math.pi / 4 if front_distance < 450:", "* 1), self.speed, self) # self.right_corner_action = MazeTurnAroundCornerAction(self.odo, self.radar, self.heading,", "heading {:07.3f}, requested heading {:07.3f}\".format(self.distance, self.speed, self.start_heading, self.requested_heading)) self.rover.command(pyroslib.publish, self.speed,", "= 1 RIGHT_WALL = 2 FRONT_WALL = 4 BACK_WALL =", "= 0 FORWARD_GAP = 1 SIDE_GAP = 2 POINTS =", "\"300 120\") def end(self): super(MoveForwardOnOdo, self).end() def next(self): state =", "= Radar(0, radar_values, radar_status, Radar(0, radar_last_values, radar_status)) state = RoverState(None,", "= left_mid_point_index self.left_point_index = left_point_index self.mid_point_index = mid_point_index self.right_point_index =", "state.radar.status[0] != 0 and abs(state.radar.radar_deltas[0]) > 100: log(LOG_LEVEL_INFO, \"Front distance", "10, 270: 10, 315: SQRT2 * 10} radar_last_values = {0:", "< -distance_speed: angle = -math.pi / 4 else: try: angle", "abs(int(math.cos(angle) * distance)) def setAngleAndDistance(self, angle, distance): self.angle = angle", "4, self.BACK_WALL, 90, 135, 180, 225) self.left_gap = self.NO_GAP self.right_gap", "self.points) updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf, self.left_wall) updateUndefinedWall(self.back_wall, self.right_wall, PIhalf, self.left_wall) updateUndefinedWall(self.right_wall,", "2 POINTS = [0, 45, 90, 135, 180, 225, 270,", "d={: 4d} s={: 3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: distance", "for p in self.POINTS} for line in self.lines: self.lines[line].calcAngle(self.distances) wls", "distances) elif lline.angle is not None and plong1 == MazeAttitude.UNKNOWN", "None] wall_processing_order = sorted(wls, key=lambda wall: self.distances[wall.ds_angle]) for wall in", "def next(self): if self.left_or_right == self.LEFT: diagonal_distance = state.radar.radar[45] else:", "= speed self.next_action = next_action if self.left_or_right == MazeAction.RIGHT: self.a1", "rh={:07.3f} e={:07.3f}\" .format(self.speed, heading, last_heading, angleDiference(heading, last_heading), self.requested_heading, self.error)) def", "math.pi), int(distance_from_wall), int(distance_error), int(state.left_wall_angle * 180 / math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle", "pyroslib.logging import time from pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG", "rline.angle >= mline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[rline.long_point_index] angle =", "log(LOG_LEVEL_INFO, \"Found chicane... lfd={: 4d} fd={: 4d} dd={: 4d} ed={:", "super(MoveForwardOnOdo, self).__init__(agent) self.stop_action = stop_action self.required_odo = {'fl': 0, 'fr':", "self.L135_180: self.Line(self.L135_180, 135, 180, -1, math.pi), self.L180_225: self.Line(self.L180_225, 225, 180,", "90, 135) self.left_wall = self.Wall(270, 6, self.LEFT_WALL, 180, 225, 270,", "end(self): super(ChicaneAction, self).end() def next(self): if self.left_or_right == self.LEFT: diagonal_distance", "self).end() def next(self): left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0] if", "225, 270, 315] WALLS = [90, 270, 0, 180] L0_45", "= distance * (1 if left_or_right == self.RIGHT else -1)", "= a - math.pi elif a <= -PIhalf: a =", "w_ds_angle in self.WALLS if self.distances[w_ds_angle] is not None] wall_processing_order =", "self.L45_90: self.Line(self.L45_90, 45, 90, -1, PIhalf), self.L90_135: self.Line(self.L90_135, 135, 90,", "L315_0 = 315 LINES = [L0_45, L45_90, L90_135, L135_180, L180_225,", "if wall.angle is None and self.distances[wall.ds_angle] is not None: if", "0, 180] L0_45 = 0 L45_90 = 45 L90_135 =", "= self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif mline.angle is", "4d} d={: 4d} s={: 3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance", "next_action=self) else: log(LOG_LEVEL_INFO, \"Found corner - turning, lfd={: 4d} fd={:", "elif distance_error > 0 and distance_error > distance_speed: angle =", "if left_distances > right_distances: log(LOG_LEVEL_INFO, \"Found corner 2 - turning", "return self.stop_action else: return self def execute(self): pass def getActionName(self):", "MazeAttitude.LEFT_WALL: wall = \"left wall\" elif point == MazeAttitude.RIGHT_WALL: wall", "start(self): super(MazeCorridorAction, self).start() self.been_in_chicane = False def end(self): super(MazeCorridorAction, self).end()", "is not None and pmid == MazeAttitude.UNKNOWN and plong2 ==", "> 100 and front_distance < 550: expected_diagonal_distance = 0 if", "= speed * speed_distance_fudge_factor if self.left_or_right == self.RIGHT: wall_angle =", "None, None, radar, None, None) def printWallLines(a): if attitude.lines[a].angle is", "normaiseAngle, angleDiference from challenge_utils import Action, PID SQRT2 = math.sqrt(2)", "self.speed = speed self.next_action = next_action def start(self): self.rover.command(pyroslib.publish, self.speed,", "self.distances[wall.mid_point_index]) else: wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust), self.distances[wall.mid_point_index]) self.points[wall.ds_angle] = wall.wall_point_kind self.distances", "str(self.speed) + \" \" + str(angle)) wheel_orientations = state.wheel_odos.odos log(LOG_LEVEL_INFO,", "distances) elif plong1 != MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and", "int(distance_error), int(distance), int(speed))) else: wall_angle = state.left_wall_angle if -min_angle <", "rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.right_corner_action if state.right_front_distance_of_wall > 100", "WheelOdos, WHEEL_NAMES from rover import normaiseAngle, angleDiference from challenge_utils import", "else: log(LOG_LEVEL_INFO, \"Finishing turning - no next action spectified.\") return", "int(speed))) distance = int(distance) angle = int(angle * 180 /", "def execute(self): pass def getActionName(self): return \"Forward ODO\" class MazeAction(Action):", "if self.left_or_right == self.RIGHT else -1)) self.pid = PID(1, 0.0,", "5 * 0.9 # state.radar.radar[315] = SQRT2 * 17 #", "distance) # pyroslib.publish(\"move/steer\", str(distance) + \" \" + str(self.speed) +", "speed self.start_heading = 0 self.last_heading = 0 self.requested_heading = 0", "rover import normaiseAngle, angleDiference from challenge_utils import Action, PID SQRT2", "= int(attitude.lines[a].angle * 180 / math.pi) point = attitude.points[a] if", "use odo to update to correct value! speed_steer_fudge_factor = 5", "self.start_heading, self.requested_heading)) self.rover.command(pyroslib.publish, self.speed, 0, self.distance) # pyroslib.publish(\"move/steer\", str(self.distance) +", "points[self.left_point_index] pmid = points[self.mid_point_index] plong2 = points[self.right_point_index] if dlong1 <", "= 4 # 4 min_angle = 1 * math.pi /", "math.pi) self.rover.command(pyroslib.publish, self.speed, angle, distance) # pyroslib.publish(\"move/steer\", str(distance) + \"", "135: 0, 180: 0, 225: 0, 270: 0, 315: 0}", "= line_index self.short_point_index = short_point_index self.long_point_index = long_point_index self.factor =", "None)) def start(self): super(ChicaneAction, self).start() def end(self): super(ChicaneAction, self).end() def", "== \"__main__\": from rover import Radar, RoverState radar_values = {0:", "distance_speed: angle = math.pi / 4 if front_distance < 450:", "-1, math.pi), self.L180_225: self.Line(self.L180_225, 225, 180, 1, -math.pi), self.L225_270: self.Line(self.L225_270,", "from pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG from rover import", "None and mline.angle is not None: if lline.angle * MazeAttitude.ANGLE_TOLLERANCE", "distances) elif dlong1 >= dlong2 and plong2 == MazeAttitude.UNKNOWN and", "and plong2 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] =", "270: 10, 315: SQRT2 * 10} radar_last_values = {0: 10,", "points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif plong1", "lines[self.mid_point_index] rline = lines[self.right_point_index] dlong1 = distances[lline.long_point_index] dmid = distances[mline.short_point_index]", "with 10 and by 180 -> 450/10 - 45deg else:", "at {:3d} angle belogs to {:s}\".format(a, angle, wall)) def printWall(w):", "points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif dlong1 >= dlong2 and", "or self.ds_angle == 180 self.selected_line = None self.angle = None", "\"Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d}", "180 steer_speed = speed * speed_steer_fudge_factor distance_speed = speed *", "if -min_angle < state.left_wall_angle < min_angle: distance = 1000000000 else:", "self.front_wall = self.Wall(0, 0, self.FRONT_WALL, 270, 315, 0, 45) self.back_wall", "FORWARD_GAP = 1 SIDE_GAP = 2 POINTS = [0, 45,", "= distance_sensor_angle self.ds_index = distance_sensor_index self.wall_point_kind = wall_point_kind self.left_mid_point_index =", "self.speed, next_action=self) else: log(LOG_LEVEL_INFO, \"Found corner - turning, lfd={: 4d}", "around corner at distance {:04d} at speed {:04d}, start heading", "distance return None def updateUndefinedWall(wall, preferable_wall, wall_adjust, second_wall): if wall.angle", "45deg else: try: angle = -math.asin(distance_error / distance_speed) except BaseException", "0, 135: 0, 180: 0, 225: 0, 270: 0, 315:", "next(self): state = self.rover.getRoverState() do_stop = False log(LOG_LEVEL_DEBUG, \"Driving to", "not None: if lline.angle * MazeAttitude.ANGLE_TOLLERANCE >= mline.angle >= lline.angle", "a = a - math.pi elif a <= -PIhalf: a", "== MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index]", "int(distance), int(speed))) else: wall_angle = state.left_wall_angle if -min_angle < state.left_wall_angle", "self.left_or_right == self.RIGHT and self.error < 0: return self else:", "last_heading), self.requested_heading, self.error)) def getActionName(self): return \"Turn-Around-Corner\" class DriverForwardForTimeAction(Action): def", "wall_point_kind self.left_mid_point_index = left_mid_point_index self.left_point_index = left_point_index self.mid_point_index = mid_point_index", "self.angle = None self.distance = None def setAngle(self, angle, distances):", "def check_next_action_conditions(self): return self class ChicaneAction(MazeAction): def __init__(self, agent, left_or_right,", "else: if state.left_front_distance_of_wall > 100 and front_distance < 550: expected_diagonal_distance", "# attitude.calculate(state) # printWalls() # # state.radar.radar[0] = 5 #", "wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={: 3d}", "WHEEL_NAMES: self.required_odo[wheel_name] = distance def start(self): super(MoveForwardOnOdo, self).start() state =", "state.right_wall_distance distance_error = distance_from_wall - self.distance angle = 0 if", "int(front_distance), int(0 * 180 / math.pi), int(distance_from_wall), int(distance_error), int(0 *", "== MazeAttitude.UNKNOWN and plong2 != MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index]", "= 0 if state.left_wall_angle < 0: expected_diagonal_distance = front_distance *", "int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return \"Corridor\" class MazeTurnAroundCornerAction(MazeAction):", "radar_status)) state = RoverState(None, None, None, radar, None, None) def", "is None: print(\"{:3d} -> line at {:3d} angle\".format(a, angle)) else:", "at {:3d}\".format(w.ds_angle, int(w.angle * 180 / math.pi), w.distance)) def printWalls():", "plong2 = points[self.right_point_index] if dlong1 < dlong2 and plong1 !=", "1000000000 else: distance = steer_speed / state.right_wall_angle if 0 <=", "mline.angle * MazeAttitude.ANGLE_TOLLERANCE >= rline.angle >= mline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index]", "= math.asin(distance_error / distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS, \"Domain", "self.next_action.getActionName()) else: log(LOG_LEVEL_INFO, \"Finishing turning - no next action spectified.\")", "wall\" elif point == MazeAttitude.RIGHT_WALL: wall = \"right wall\" elif", "270: 0, 315: 0} def calculate(self, state): def getPointDistance(state, angle):", "def execute(self): state = self.rover.getRoverState() heading = state.heading.heading last_heading =", "distance_error < -distance_speed: angle = -math.pi / 4 else: try:", "turning around the corner - invoking next action \" +", "wall: self.distances[wall.ds_angle]) for wall in wall_processing_order: wall.tryFindingWall(self.distances, self.lines, self.points) updateUndefinedWall(self.front_wall,", "1, -PIhalf), self.L135_180: self.Line(self.L135_180, 135, 180, -1, math.pi), self.L180_225: self.Line(self.L180_225,", "math.pi))) else: print(\"Wall {:3d} -> has angle {:3d} and is", "1, -math.pi), self.L45_90: self.Line(self.L45_90, 45, 90, -1, PIhalf), self.L90_135: self.Line(self.L90_135,", "self.Wall(180, 4, self.BACK_WALL, 90, 135, 180, 225) self.left_gap = self.NO_GAP", "True if state.radar.radar[0] < 1.0 or state.radar.radar[315] < 1.0 or", "int(w.angle * 180 / math.pi))) else: print(\"Wall {:3d} -> has", "points[self.right_point_index] if dlong1 < dlong2 and plong1 != MazeAttitude.UNKNOWN and", "def getPointDistance(state, angle): distance = state.radar.radar[angle] status = state.radar.status[angle] if", "self.a1 = 315 self.a2 = 270 self.a3 = 225 self.left_corner_action", "= time self.speed = speed self.next_action = next_action def start(self):", "not None and mline.angle is not None: if lline.angle *", "> right_distances: log(LOG_LEVEL_INFO, \"Found corner 2 - turning left, fd={:", "state.radar.radar[315] = SQRT2 * 17 # state.radar.radar[270] = SQRT2 *", "distances[mline.long_point_index] plong1 = points[self.left_point_index] pmid = points[self.mid_point_index] plong2 = points[self.right_point_index]", "* 10, 180: 10, 225: SQRT2 * 10, 270: 10,", "45 self.a2 = 90 self.a3 = 135 else: self.a1 =", "pass def getActionName(self): return \"Forward ODO\" class MazeAction(Action): LEFT =", "self.left_or_right == self.RIGHT and diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found end", "- TODO use odo to update to correct value! speed_steer_fudge_factor", "self.right_corner_action return self def execute(self): state = self.rover.getRoverState() left_diagonal_distance =", "= distances[mline.short_point_index] dlong2 = distances[mline.long_point_index] plong1 = points[self.left_point_index] pmid =", "point == MazeAttitude.FRONT_WALL: wall = \"front wall\" elif point ==", "2, self.RIGHT_WALL, 0, 45, 90, 135) self.left_wall = self.Wall(270, 6,", "90, 1, -PIhalf), self.L135_180: self.Line(self.L135_180, 135, 180, -1, math.pi), self.L180_225:", "corner - turning, lfd={: 4d} fd={: 4d} dd={: 4d} ed={:", "self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) def __init__(self): self.lines =", "__init__(self, agent, stop_action=None): super(MoveForwardOnOdo, self).__init__(agent) self.stop_action = stop_action self.required_odo =", "ofchicane - leaging, rfd={: 4d}\".format(int(diagonal_distance))) return self.next_action return self def", "self.pid.process(self.requested_heading, heading) if self.left_or_right == self.LEFT and self.error > 0:", "__init__(self, agent, time, speed, next_action): super(DriverForwardForTimeAction, self).__init__(agent) self.time = time", "if point == MazeAttitude.LEFT_WALL: wall = \"left wall\" elif point", "self.BACK_WALL, 90, 135, 180, 225) self.left_gap = self.NO_GAP self.right_gap =", "math.pi return a class Line: def __init__(self, line_index, long_point_index, short_point_index,", "distance = state.radar.radar[angle] status = state.radar.status[angle] if status == 0:", "if self.next_action is not None: log(LOG_LEVEL_INFO, \"Finished turning around the", "45, 0, 1, -math.pi), self.L45_90: self.Line(self.L45_90, 45, 90, -1, PIhalf),", "self.left_or_right = left_or_right self.distance = distance * (1 if left_or_right", "> 0: return self elif self.left_or_right == self.RIGHT and self.error", "dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(wall_angle", "if pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind if plong2 ==", "= math.pi / 4 else: try: angle = -math.asin(distance_error /", "self).end() def next(self): state = self.rover.getRoverState() do_stop = False log(LOG_LEVEL_DEBUG,", "0.0, 0.05, 1, 0, diff_method=angleDiference) self.pid.process(self.requested_heading, self.start_heading) log(LOG_LEVEL_INFO, \"Starting to", "print(\"Wall {:3d} -> has angle {:3d} and is at {:3d}\".format(w.ds_angle,", "distance_speed: angle = -math.pi / 4 elif distance_error < 0", "distance last_distance = state.radar.last_radar[angle] if abs(distance - last_distance) < 100:", "self.wall_point_kind self.setAngle(mline.angle, distances) elif plong1 == MazeAttitude.UNKNOWN and pmid ==", "self.speed = 150 # 150 speed = 50 # mm/second", "super(ChicaneAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance self.speed =", "plong2 == MazeAttitude.UNKNOWN: points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) else: if", "calculate(self, state): def getPointDistance(state, angle): distance = state.radar.radar[angle] status =", "ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) return self.left_corner_action if front_distance <", "\"Found end ofchicane - leaging, rfd={: 4d}\".format(int(diagonal_distance))) return self.next_action return", "self.L90_135: self.Line(self.L90_135, 135, 90, 1, -PIhalf), self.L135_180: self.Line(self.L135_180, 135, 180,", "distance < 0: distance = -150 distance = -distance distance_from_wall", "if self.left_or_right == self.LEFT and diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found", "\"; starting...\") self.rover.command(pyroslib.publish, 300, 120) # pyroslib.publish(\"move/steer\", \"300 120\") def", "state.heading.heading self.requested_heading = normaiseAngle(self.start_heading + 80 * -(1 if self.left_or_right", "int(0 * 180 / math.pi), int(0), int(steer_speed), int(distance_speed), int(distance), int(angle),", "1.0 or state.radar.radar[315] < 1.0 or state.radar.radar[45] < 1.0: do_stop", "return distance last_distance = state.radar.last_radar[angle] if abs(distance - last_distance) <", "/ 1800 # divide with 10 and by 180 ->", "pmid == MazeAttitude.UNKNOWN and plong2 != MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind", "self.rover.command(pyroslib.publish, 300, 120) # pyroslib.publish(\"move/steer\", \"300 120\") def end(self): super(MoveForwardOnOdo,", "{:3d} and is at {:3d}\".format(w.ds_angle, int(w.angle * 180 / math.pi),", "180 -> 450/10 - 45deg else: try: angle = math.asin(distance_error", "= speed self.next_action = next_action def start(self): self.rover.command(pyroslib.publish, self.speed, 0)", "Radar(0, radar_last_values, radar_status)) state = RoverState(None, None, None, radar, None,", "= int(angle * 180 / math.pi) self.rover.command(pyroslib.publish, self.speed, angle, distance)", "and plong2 != MazeAttitude.UNKNOWN and mline.angle * MazeAttitude.ANGLE_TOLLERANCE >= rline.angle", "= state.radar.radar[315] if self.left_or_right == self.LEFT and diagonal_distance > 800:", "-PIhalf, self.left_wall) updateUndefinedWall(self.back_wall, self.right_wall, PIhalf, self.left_wall) updateUndefinedWall(self.right_wall, self.front_wall, PIhalf, self.back_wall)", "and distance_error > distance_speed: angle = -math.pi / 4 elif", "def calculate(self, state): def getPointDistance(state, angle): distance = state.radar.radar[angle] status", "= self.rover.getRoverState() for wheel in self.required_odo: self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel] +", "points[self.mid_point_index] = points[rline.long_point_index] angle = MazeAttitude.normAngle(mline.angle + PIhalf) distance =", "2 - turning left, fd={: 4d} ld={: 4d} rd={: 4d}\".format(int(front_distance),", "and diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found end ofchicane - leaging,", "self.front_wall, -PIhalf, self.back_wall) # TODO calc gaps class MoveForwardOnOdo(Action): def", "= state.radar.radar[315] front_distance = state.radar.radar[0] gain = 60 offset =", "-150 distance_from_wall = state.left_wall_distance distance_error = distance_from_wall - self.distance angle", "* 10} radar_last_values = {0: 10, 45: SQRT2 * 10,", "= self.Wall(270, 6, self.LEFT_WALL, 180, 225, 270, 315) self.front_wall =", "and distance_error > distance_speed: angle = -math.pi / 4 if", "4d} angle={: 3d} heading={: 3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(0 *", "wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust), self.distances[wall.mid_point_index]) else: wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust), self.distances[wall.mid_point_index]) self.points[wall.ds_angle]", "\"Found corner 2 - turning left, fd={: 4d} ld={: 4d}", "= self.wall_point_kind self.setAngle(lline.angle, distances) elif dlong1 >= dlong2 and plong2", "wheel_orientations = state.wheel_odos.odos # log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d} wa={: 3d}", "angle\".format(a, angle)) else: if point == MazeAttitude.LEFT_WALL: wall = \"left", "fd={: 4d} ld={: 4d} rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.right_corner_action", "= -distance distance_from_wall = state.right_wall_distance distance_error = distance_from_wall - self.distance", "math.pi), self.L180_225: self.Line(self.L180_225, 225, 180, 1, -math.pi), self.L225_270: self.Line(self.L225_270, 225,", "print(\"Wall {:3d} -> is too far - not calculated\".format(w.ds_angle)) else:", "int(speed))) else: distance = 1000000000 distance_from_wall = state.radar.radar[270] distance_error =", "Club # # MIT License # import math import pyroslib", "final corner - turning to finish, rfd={: 4d} fd={: 4d}", "w in attitude.walls: printWall(w) print(\"----------------------------------------------------------\") # attitude.calculate(state) # printWalls() #", "\"back wall\" else: wall = \"no wall\" print(\"{:3d} -> line", "= state.radar.radar[315] front_distance = state.radar.radar[0] if state.radar.status[0] != 0 and", "corner - invoking next action \" + self.next_action.getActionName()) else: log(LOG_LEVEL_INFO,", "PID(1, 0.0, 0.05, 1, 0, diff_method=angleDiference) self.pid.process(self.requested_heading, self.start_heading) log(LOG_LEVEL_INFO, \"Starting", "+ \" \" + str(angle)) wheel_orientations = state.wheel_odos.odos # log(LOG_LEVEL_INFO,", "SQRT2 * 12 # attitude.calculate(state) # printWalls() state.radar.radar[180] = 50", "distance_sensor_index, wall_point_kind, left_mid_point_index, left_point_index, mid_point_index, right_point_index): self.ds_angle = distance_sensor_angle self.ds_index", "180 / math.pi), int(0), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl'])", "4d} dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) return self.left_corner_action", "self.speed, self.start_heading, self.requested_heading)) self.rover.command(pyroslib.publish, self.speed, 0, self.distance) # pyroslib.publish(\"move/steer\", str(self.distance)", "start(self): super(MazeTurnAroundCornerAction, self).start() state = self.rover.getRoverState() self.start_heading = state.heading.heading self.requested_heading", "else -1)) self.pid = PID(1, 0.0, 0.05, 1, 0, diff_method=angleDiference)", "to \" + str(self.required_odo) + \"; starting...\") self.rover.command(pyroslib.publish, 300, 120)", "0 and distance_error < -distance_speed: angle = -math.pi / 4", "+ self.required_odo[wheel]) log(LOG_LEVEL_DEBUG, \"Reset odo to \" + str(self.required_odo) +", "self.angle = None def calcAngle(self, distances): long_distance = distances[self.long_point_index] short_distance", "self.distance = None def setAngle(self, angle, distances): self.angle = angle", "{self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle: self.back_wall} self.points =", "abs(int(math.sin(angle) * distance)) else: self.distance = abs(int(math.cos(angle) * distance)) def", "de={: 4d} d={: 4d} s={: 3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed)))", "else: distance = steer_speed / state.left_wall_angle if 0 <= distance", "SQRT2 * 17 # state.radar.radar[270] = SQRT2 * 13 #", "/ 4 elif distance_error < 0 and distance_error < -distance_speed:", "None self.angle = None self.distance = None def setAngle(self, angle,", "radar_last_values = {0: 10, 45: SQRT2 * 10, 90: 10,", "= 0 L45_90 = 45 L90_135 = 90 L135_180 =", "self.distances[wall.ds_angle] is not None: if preferable_wall.angle is not None: wall.setAngleAndDistance(self.normAngle(preferable_wall.angle", "and left_diagonal_distance > expected_diagonal_distance * 1.2: log(LOG_LEVEL_INFO, \"Found chicane... lfd={:", "log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG from rover import WheelOdos, WHEEL_NAMES from", "self.ds_angle == 0 or self.ds_angle == 180 self.selected_line = None", "4d}\".format(int(diagonal_distance))) return self.next_action return self def execute(self): state = self.rover.getRoverState()", "* 1.2: log(LOG_LEVEL_INFO, \"Found chicane... lfd={: 4d} fd={: 4d} dd={:", "4d} d={: 4d} s={: 3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance", "45 L90_135 = 90 L135_180 = 135 L180_225 = 180", "= \"no wall\" print(\"{:3d} -> line at {:3d} angle belogs", "else: if self.is_front_or_back: self.distance = abs(int(math.sin(angle) * distance)) else: self.distance", "agent, left_or_right, distance, speed, next_action=None): super(MazeCorridorAction, self).__init__(agent) self.left_or_right = left_or_right", "odo={:7.2f}\".format( float(time.time()), int(front_distance), int(0 * 180 / math.pi), int(distance_from_wall), int(distance_error),", "radar_last_values, radar_status)) state = RoverState(None, None, None, radar, None, None)", "3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance = int(distance) angle =", "return self.next_action return self def execute(self): state = self.rover.getRoverState() front_distance", "self).start() state = self.rover.getRoverState() for wheel in self.required_odo: self.required_odo[wheel] =", "def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(ChicaneAction, self).__init__(agent) self.left_or_right", "is not None and plong1 == MazeAttitude.UNKNOWN and pmid ==", "= [L0_45, L45_90, L90_135, L135_180, L180_225, L225_270, L270_315, L315_0] ANGLE_TOLLERANCE", "225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance * 1), self.speed, self)", "int(wall_angle * 180 / math.pi), int(distance_from_wall), int(distance_error), int(state.left_wall_angle * 180", "self.last_heading = heading log(LOG_LEVEL_INFO, \"Turning speed={:04d} h={:07.3f} lh={:07.3f} dh={:07.3f} rh={:07.3f}", "self.LEFT, self.distance, self.speed, next_action=self) else: log(LOG_LEVEL_INFO, \"Found corner - turning,", "left_distances = state.radar.radar[270] + state.radar.radar[315] right_distances = state.radar.radar[90] + state.radar.radar[45]", "log(LOG_LEVEL_INFO, \"Found corner 2 - turning left, fd={: 4d} ld={:", "self.time -= 1 log(LOG_LEVEL_INFO, \"Going forward for \" + str(self.time)", "225) self.left_gap = self.NO_GAP self.right_gap = self.NO_GAP self.walls = {self.right_wall.ds_angle:", "heading log(LOG_LEVEL_INFO, \"Turning speed={:04d} h={:07.3f} lh={:07.3f} dh={:07.3f} rh={:07.3f} e={:07.3f}\" .format(self.speed,", "self.speed = speed self.start_heading = 0 self.last_heading = 0 self.requested_heading", "self).__init__(agent) self.time = time self.speed = speed self.next_action = next_action", "+ str(self.time) + \" ticks.\") return self return self.next_action if", "WALLS = [90, 270, 0, 180] L0_45 = 0 L45_90", "math.pi elif a <= -PIhalf: a = a + math.pi", "def execute(self): state = self.rover.getRoverState() left_diagonal_distance = state.radar.radar[315] front_distance =", "and short_distance is not None: lsqrt2 = long_distance / SQRT2", "L180_225, L225_270, L270_315, L315_0] ANGLE_TOLLERANCE = 1.075 @staticmethod def normAngle(a):", "front_distance < 550: expected_diagonal_distance = 0 if state.left_wall_angle < 0:", "self.back_wall.ds_angle: self.back_wall} self.points = {0: 0, 45: 0, 90: 0,", "/ math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle * 180 / math.pi), int(state.right_front_distance_of_wall), int(steer_speed),", "self.right_corner_action = MazeTurnAroundCornerAction(self.odo, self.radar, self.heading, self.RIGHT, self.distance, self.speed, DriverForwardForTimeActoun(10, self.speed,", "if plong1 == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind if pmid ==", "self.error > 0: return self elif self.left_or_right == self.RIGHT and", "self.setAngleAndDistance(angle, distance) elif lline.angle is not None and mline.angle is", "= SQRT2 * 12 # attitude.calculate(state) # printWalls() state.radar.radar[180] =", "class Wall: def __init__(self, distance_sensor_angle, distance_sensor_index, wall_point_kind, left_mid_point_index, left_point_index, mid_point_index,", "int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return \"Chicane \"", "print(\"{:3d} -> line at {:3d} angle\".format(a, angle)) else: if point", "lsqrt2 = long_distance / SQRT2 self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 -", "self.L180_225: self.Line(self.L180_225, 225, 180, 1, -math.pi), self.L225_270: self.Line(self.L225_270, 225, 270,", "225: 0, 270: 0, 315: 0} def calculate(self, state): def", "None class Wall: def __init__(self, distance_sensor_angle, distance_sensor_index, wall_point_kind, left_mid_point_index, left_point_index,", "state.right_front_distance_of_wall > 100 and state.left_front_distance_of_wall > 100 and front_distance <", "-distance distance_from_wall = state.right_wall_distance distance_error = distance_from_wall - self.distance angle", "= lines[self.left_point_index] mline = lines[self.mid_point_index] rline = lines[self.right_point_index] dlong1 =", "315: 0} def calculate(self, state): def getPointDistance(state, angle): distance =", "and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN: points[self.mid_point_index] =", "'bl': 0, 'br': 0} def setRequiredOdo(self, distance): for wheel_name in", "self.RIGHT, self.distance, self.speed, DriverForwardForTimeActoun(10, self.speed, None)) self.been_in_chicane = False def", "* MazeAttitude.ANGLE_TOLLERANCE >= mline.angle >= lline.angle / MazeAttitude.ANGLE_TOLLERANCE: if plong1", "self.setAngle(mline.angle, distances) elif plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN", "speed, next_action=None): super(ChicaneAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance", "MazeAttitude.ANGLE_TOLLERANCE >= mline.angle >= lline.angle / MazeAttitude.ANGLE_TOLLERANCE: if plong1 ==", "= state.radar.radar[angle] status = state.radar.status[angle] if status == 0: return", "< 450: angle -= math.pi * (450 - front_distance) /", "attitude.calculate(state) # printWalls() state.radar.radar[180] = 50 state.radar.radar[315] = 30 attitude.calculate(state)", "3d} dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}\".format( float(time.time()), int(front_distance),", "front_distance * math.cos(state.left_wall_angle) * SQRT2 if False and not self.been_in_chicane", "self def execute(self): pass def getActionName(self): return \"Forward ODO\" class", "diagonal_distance = state.radar.radar[315] if self.left_or_right == self.LEFT and diagonal_distance >", "False and not self.been_in_chicane and front_distance > 300 and left_diagonal_distance", "* speed_distance_fudge_factor if self.left_or_right == self.RIGHT: wall_angle = state.right_wall_angle if", "0 and distance_error > distance_speed: angle = math.pi / 4", "225, 270, -1, PIhalf), self.L270_315: self.Line(self.L270_315, 315, 270, 1, -PIhalf)}", "state = self.rover.getRoverState() do_stop = False log(LOG_LEVEL_DEBUG, \"Driving to \"", "= abs(int(math.cos(angle) * distance)) def setAngleAndDistance(self, angle, distance): self.angle =", "* abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif lline.angle is not", "0) # pyroslib.publish(\"move/drive\", \"0 \" + str(self.speed)) log(LOG_LEVEL_INFO, \"Going forward", "heading = state.heading.heading last_heading = self.last_heading self.last_heading = heading log(LOG_LEVEL_INFO,", "270: 10, 315: SQRT2 * 10} radar_status = {0: 0,", "* 180 / math.pi), int(0), int(0 * 180 / math.pi),", "speed, next_action): super(DriverForwardForTimeAction, self).__init__(agent) self.time = time self.speed = speed", "normAngle(a): if a > PIhalf: a = a - math.pi", "None: print(\"{:3d} -> line at {:3d} angle\".format(a, angle)) else: if", "== self.RIGHT: wall_angle = state.right_wall_angle if -min_angle < state.right_wall_angle <", "270, 315) self.front_wall = self.Wall(0, 0, self.FRONT_WALL, 270, 315, 0,", "{p: getPointDistance(state, p) for p in self.POINTS} for line in", "-= math.pi * (450 - front_distance) / 1800 # divide", "\"Driving to \" + str(self.required_odo)) for wheel_name in WHEEL_NAMES: if", "= self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) elif plong1 ==", "= distance_from_wall - self.distance angle = 0 if abs(distance_error) <", "self.lines, self.points) updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf, self.left_wall) updateUndefinedWall(self.back_wall, self.right_wall, PIhalf, self.left_wall)", "if front_distance < 450: angle -= math.pi * (450 -", "* math.pi / 180 steer_speed = speed * speed_steer_fudge_factor distance_speed", "225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, self.distance, self.speed, self) self.right_corner_action =", "distance_sensor_angle self.ds_index = distance_sensor_index self.wall_point_kind = wall_point_kind self.left_mid_point_index = left_mid_point_index", "not None and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN:", "-PIhalf), self.L135_180: self.Line(self.L135_180, 135, 180, -1, math.pi), self.L180_225: self.Line(self.L180_225, 225,", "pyroslib.publish(\"move/steer\", \"300 120\") def end(self): super(MoveForwardOnOdo, self).end() def next(self): state", "= 150 # Values that worked speed=150, steer=5-7, dist=4 #", "DriverForwardForTimeAction(Action): def __init__(self, agent, time, speed, next_action): super(DriverForwardForTimeAction, self).__init__(agent) self.time", "agent, time, speed, next_action): super(DriverForwardForTimeAction, self).__init__(agent) self.time = time self.speed", "wall_point_kind, left_mid_point_index, left_point_index, mid_point_index, right_point_index): self.ds_angle = distance_sensor_angle self.ds_index =", "if plong2 == MazeAttitude.UNKNOWN: points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) else:", "(450 - front_distance) / 1800 # divide with 10 and", "angle)) else: if point == MazeAttitude.LEFT_WALL: wall = \"left wall\"", "/ math.pi))) else: print(\"Wall {:3d} -> has angle {:3d} and", "/ math.pi), int(state.right_front_distance_of_wall), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) ))", "distance) elif dlong1 >= dlong2 and plong2 != MazeAttitude.UNKNOWN and", "- not calculated\".format(w.ds_angle)) else: if w.distance is None: print(\"Wall {:3d}", "dlong2 and plong2 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index]", ")) def getActionName(self): return \"Chicane \" + (\"L\" if self.left_or_right", "self.left_or_right = self.RIGHT elif self.left_or_right == self.RIGHT and diagonal_distance >", "+ state.left_wall_angle) else: expected_diagonal_distance = front_distance * math.cos(state.left_wall_angle) * SQRT2", "pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG from rover import WheelOdos,", "= distance def start(self): super(MoveForwardOnOdo, self).start() state = self.rover.getRoverState() for", "None: log(LOG_LEVEL_INFO, \"Finished turning around the corner - invoking next", "ex: log(LOG_LEVEL_ALWAYS, \"Domain error wa={: 3d} dw={: 4d} de={: 4d}", "dw={: 4d} de={: 4d} d={: 4d} s={: 3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error),", "abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif dlong1 >= dlong2 and", "= distances[lline.long_point_index] dmid = distances[mline.short_point_index] dlong2 = distances[mline.long_point_index] plong1 =", "self.LEFT_WALL, 180, 225, 270, 315) self.front_wall = self.Wall(0, 0, self.FRONT_WALL,", "is not None and short_distance is not None: lsqrt2 =", "stop_action self.required_odo = {'fl': 0, 'fr': 0, 'bl': 0, 'br':", "> 0 and distance_error > distance_speed: angle = math.pi /", "str(self.distance) + \" \" + str(self.speed)) def end(self): super(MazeTurnAroundCornerAction, self).end()", "int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return \"Corridor\"", "\" ticks.\") return self return self.next_action if __name__ == \"__main__\":", "self.wall_point_kind self.setAngle(lline.angle, distances) elif dlong1 >= dlong2 and plong2 ==", "last_distance = state.radar.last_radar[angle] if abs(distance - last_distance) < 100: return", "distance): self.angle = angle self.distance = distance def tryFindingWall(self, distances,", "self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, self.distance, self.speed, DriverForwardForTimeAction(self, 10, self.speed, None))", "self.wall_point_kind if plong2 == MazeAttitude.UNKNOWN: points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances)", "0, 180: 0, 225: 0, 270: 0, 315: 0} def", "distance < 0: distance = -150 distance_from_wall = state.left_wall_distance distance_error", "{:3d} -> has angle {:3d} but is too far -", "points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif dlong1", "def calcAngle(self, distances): long_distance = distances[self.long_point_index] short_distance = distances[self.short_point_index] if", "distance_from_wall = state.radar.radar[270] distance_error = distance_from_wall - self.distance angle =", "3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(wall_angle * 180 / math.pi), int(distance_from_wall),", "long_distance = distances[self.long_point_index] short_distance = distances[self.short_point_index] if long_distance is not", "long_distance / SQRT2 self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 - short_distance) *", "180 self.selected_line = None self.angle = None self.distance = None", "distance self.speed = speed self.next_action = next_action if self.left_or_right ==", "self.L270_315: self.Line(self.L270_315, 315, 270, 1, -PIhalf)} self.right_wall = self.Wall(90, 2,", "= 1000000000 distance_from_wall = state.radar.radar[270] distance_error = distance_from_wall - self.distance", "self.LEFT and self.error > 0: return self elif self.left_or_right ==", "def next(self): left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0] if state.radar.status[0]", "SQRT2) self.setAngleAndDistance(angle, distance) elif dlong1 >= dlong2 and plong2 !=", "<= distance < 150: distance = 150 elif -150 <", "180 / math.pi))) else: print(\"Wall {:3d} -> has angle {:3d}", "[0, 45, 90, 135, 180, 225, 270, 315] WALLS =", "self.wall_point_kind if pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind if plong2", "* 180 / math.pi))) else: print(\"Wall {:3d} -> has angle", "0, 315: 0} def calculate(self, state): def getPointDistance(state, angle): distance", "= 315 LINES = [L0_45, L45_90, L90_135, L135_180, L180_225, L225_270,", "state.radar.radar[0] < 1.0 or state.radar.radar[315] < 1.0 or state.radar.radar[45] <", "RoverState(None, None, None, radar, None, None) def printWallLines(a): if attitude.lines[a].angle", "0 else: if self.is_front_or_back: self.distance = abs(int(math.sin(angle) * distance)) else:", "270 L315_0 = 315 LINES = [L0_45, L45_90, L90_135, L135_180,", "wall_processing_order = sorted(wls, key=lambda wall: self.distances[wall.ds_angle]) for wall in wall_processing_order:", "1), self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance * 1),", "int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return \"Chicane", "# self.right_corner_action = MazeTurnAroundCornerAction(self.odo, self.radar, self.heading, self.RIGHT, self.distance, self.speed, DriverForwardForTimeActoun(10,", "int(self.distance * 1), self.speed, self) # self.right_corner_action = MazeTurnAroundCornerAction(self.odo, self.radar,", "angle self.distance = distance def tryFindingWall(self, distances, lines, points): lmline", "dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(0", "135 else: self.a1 = 315 self.a2 = 270 self.a3 =", "forward for \" + str(self.time) + \" ticks.\") def end(self):", "class DriverForwardForTimeAction(Action): def __init__(self, agent, time, speed, next_action): super(DriverForwardForTimeAction, self).__init__(agent)", "self.setAngle(mline.angle, distances) elif lline.angle is not None and plong1 ==", "not None] wall_processing_order = sorted(wls, key=lambda wall: self.distances[wall.ds_angle]) for wall", "135, 180, 225) self.left_gap = self.NO_GAP self.right_gap = self.NO_GAP self.walls", "h={:07.3f} lh={:07.3f} dh={:07.3f} rh={:07.3f} e={:07.3f}\" .format(self.speed, heading, last_heading, angleDiference(heading, last_heading),", "if state.left_front_distance_of_wall > 100 and front_distance < 550: expected_diagonal_distance =", "self.RIGHT elif self.left_or_right == self.RIGHT and diagonal_distance > 800: log(LOG_LEVEL_INFO,", "/ math.pi), int(0), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) ))", "speed, next_action=None): super(MazeTurnAroundCornerAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance", "= lines[self.right_point_index] dlong1 = distances[lline.long_point_index] dmid = distances[mline.short_point_index] dlong2 =", "and front_distance > 300 and left_diagonal_distance > expected_diagonal_distance * 1.2:", "\" + self.next_action.getActionName()) else: log(LOG_LEVEL_INFO, \"Finishing turning - no next", "= 1 SIDE_GAP = 2 POINTS = [0, 45, 90,", "180 / math.pi) point = attitude.points[a] if point is None:", "@staticmethod def normAngle(a): if a > PIhalf: a = a", "== MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 != MazeAttitude.UNKNOWN:", "0, 45, 90, 135) self.left_wall = self.Wall(270, 6, self.LEFT_WALL, 180,", "import Action, PID SQRT2 = math.sqrt(2) PIhalf = math.pi /", "/ 2 class MazeAttitude: UNKNOWN = 0 LEFT_WALL = 1", "class ChicaneAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(ChicaneAction,", "= self.wall_point_kind self.setAngle(lline.angle, distances) elif mline.angle is not None and", "speed_distance_fudge_factor if self.left_or_right == self.RIGHT: distance = -1000000000 distance_from_wall =", "preferable_wall.angle is not None: wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust), self.distances[wall.mid_point_index]) else: wall.setAngleAndDistance(self.normAngle(second_wall.angle", "4d} s={: 3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance = int(distance)", "class Line: def __init__(self, line_index, long_point_index, short_point_index, factor, adjust): self.line_index", "with 10 and by 180 -> 450/10 - 45deg elif", "next_action def start(self): self.rover.command(pyroslib.publish, self.speed, 0) # pyroslib.publish(\"move/drive\", \"0 \"", "else: self.distance = abs(int(math.cos(angle) * distance)) def setAngleAndDistance(self, angle, distance):", "state.right_wall_angle if -min_angle < state.right_wall_angle < min_angle: distance = 1000000000", "/ math.pi) point = attitude.points[a] if point is None: print(\"{:3d}", "0, 180: 0, 225: 0, 270: 0, 315: 0} attitude", "attitude.points: printWallLines(p) for w in attitude.walls: printWall(w) print(\"----------------------------------------------------------\") # attitude.calculate(state)", "< 100: return distance return None def updateUndefinedWall(wall, preferable_wall, wall_adjust,", "4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={:", "d={:4d} s={:2d} delta={:4d}\".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0])) else: if state.left_front_distance_of_wall > 100", "setAngle(self, angle, distances): self.angle = angle distance = distances[self.mid_point_index] if", "SQRT2 * 10, 90: 10, 135: SQRT2 * 10, 180:", "self.speed, angle, distance) # pyroslib.publish(\"move/steer\", str(distance) + \" \" +", "# self.speed = 150 # 150 speed = 50 #", "= state.right_wall_distance distance_error = distance_from_wall - self.distance angle = 0", "int(distance_error), int(distance), int(speed))) distance = int(distance) angle = int(angle *", "else: self.a1 = 315 self.a2 = 270 self.a3 = 225", "radar_status = {0: 0, 45: 0, 90: 0, 135: 0,", "log(LOG_LEVEL_INFO, \"Finished turning around the corner - invoking next action", "no next action spectified.\") return self.next_action def execute(self): state =", "None, None) def printWallLines(a): if attitude.lines[a].angle is None: print(\"{:3d} ->", "180: 10, 225: SQRT2 * 10, 270: 10, 315: SQRT2", "0} attitude = MazeAttitude() radar = Radar(0, radar_values, radar_status, Radar(0,", "end(self): super(MazeTurnAroundCornerAction, self).end() def next(self): heading = state.heading.heading self.error =", "{0: 10, 45: SQRT2 * 10, 90: 10, 135: SQRT2", ">= self.required_odo[wheel_name]: do_stop = True if state.radar.radar[0] < 1.0 or", "math.pi / 4 if front_distance < 450: angle += math.pi", "distances[self.left_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif lline.angle is", "agent, left_or_right, distance, speed, next_action=None): super(ChicaneAction, self).__init__(agent) self.left_or_right = left_or_right", "math.pi), int(state.right_front_distance_of_wall), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def", "state = self.rover.getRoverState() front_distance = state.radar.radar[0] gain = 60 offset", "180, 225) self.left_gap = self.NO_GAP self.right_gap = self.NO_GAP self.walls =", "315, 0, 45) self.back_wall = self.Wall(180, 4, self.BACK_WALL, 90, 135,", "\" + str(self.required_odo)) for wheel_name in WHEEL_NAMES: if state.wheel_odos[wheel_name] >=", "corner 2 - turning left, fd={: 4d} ld={: 4d} rd={:", "10, 90: 10, 135: SQRT2 * 10, 180: 10, 225:", "{:3d} angle\".format(a, angle)) else: if point == MazeAttitude.LEFT_WALL: wall =", "for wheel_name in WHEEL_NAMES: if state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]: do_stop =", "angle = MazeAttitude.normAngle(mline.angle + PIhalf) distance = distances[self.left_point_index] * abs(math.sin(mline.angle)", "+ str(angle)) wheel_orientations = state.wheel_odos.odos log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d} wa={:", "in attitude.walls: printWall(w) print(\"----------------------------------------------------------\") # attitude.calculate(state) # printWalls() # #", "math.asin(distance_error / distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS, \"Domain error", "self.a3 = 225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance * 1),", "None self.distance = None def setAngle(self, angle, distances): self.angle =", "if distance < 1: self.distance = 0 else: if self.is_front_or_back:", "points[self.left_point_index] = self.wall_point_kind if pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind", "abs(distance - last_distance) < 100: return distance return None def", "turning, lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance),", "and diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found second part of chicane,", "self.selected_line = None self.angle = None self.distance = None def", "pmid == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle,", "state.radar.radar[270] + state.radar.radar[315] right_distances = state.radar.radar[90] + state.radar.radar[45] if left_distances", "int(0), int(0 * 180 / math.pi), int(0), int(steer_speed), int(distance_speed), int(distance),", "0 self.requested_heading = 0 self.pid = None self.next_action = next_action", "MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[rline.long_point_index] angle = MazeAttitude.normAngle(mline.angle + PIhalf) distance", "MazeAction.RIGHT: self.a1 = 45 self.a2 = 90 self.a3 = 135", "# state.radar.radar[270] = SQRT2 * 13 # state.radar.radar[225] = SQRT2", "wall_angle = state.left_wall_angle if -min_angle < state.left_wall_angle < min_angle: distance", "wall_processing_order: wall.tryFindingWall(self.distances, self.lines, self.points) updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf, self.left_wall) updateUndefinedWall(self.back_wall, self.right_wall,", "self.start_heading) log(LOG_LEVEL_INFO, \"Starting to turn around corner at distance {:04d}", "as ex: log(LOG_LEVEL_ALWAYS, \"Domain error wa={: 3d} dw={: 4d} de={:", "to turn around corner at distance {:04d} at speed {:04d},", "# 150 speed = 50 # mm/second - TODO use", "3d} dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={: 3d} ra={:", "self.distance, self.speed, DriverForwardForTimeActoun(10, self.speed, None)) self.been_in_chicane = False def start(self):", "self.RIGHT, int(self.distance * 1), self.speed, self) # self.right_corner_action = MazeTurnAroundCornerAction(self.odo,", "if a > PIhalf: a = a - math.pi elif", "MazeAttitude.UNKNOWN and plong2 != MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] =", "angle {:3d} and is at {:3d}\".format(w.ds_angle, int(w.angle * 180 /", "execute(self): state = self.rover.getRoverState() front_distance = state.radar.radar[0] gain = 60", "- math.pi elif a <= -PIhalf: a = a +", "self.points = {0: 0, 45: 0, 90: 0, 135: 0,", "distance def tryFindingWall(self, distances, lines, points): lmline = lines[self.left_mid_point_index] lline", "a class Line: def __init__(self, line_index, long_point_index, short_point_index, factor, adjust):", "\"Found second part of chicane, rfd={: 4d}\".format(int(diagonal_distance))) self.left_or_right = self.RIGHT", "225: 0, 270: 0, 315: 0} attitude = MazeAttitude() radar", "self).start() state = self.rover.getRoverState() self.start_heading = state.heading.heading self.requested_heading = normaiseAngle(self.start_heading", "WHEEL_NAMES from rover import normaiseAngle, angleDiference from challenge_utils import Action,", "= state.heading.heading self.error = self.pid.process(self.requested_heading, heading) if self.left_or_right == self.LEFT", "for line in self.lines: self.lines[line].calcAngle(self.distances) wls = [self.walls[w_ds_angle] for w_ds_angle", "leaging, rfd={: 4d}\".format(int(diagonal_distance))) return self.next_action return self def execute(self): state", "MazeTurnAroundCornerAction(self, self.RIGHT, self.distance, self.speed, DriverForwardForTimeAction(self, 10, self.speed, None)) def start(self):", "- turning left, fd={: 4d} ld={: 4d} rd={: 4d}\".format(int(front_distance), int(left_distances),", "0, 315: 0} self.distances = {0: 0, 45: 0, 90:", "= 90 L135_180 = 135 L180_225 = 180 L225_270 =", "\"__main__\": from rover import Radar, RoverState radar_values = {0: 10,", "else \"R\") class MazeCorridorAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed,", "int(speed))) else: wall_angle = state.left_wall_angle if -min_angle < state.left_wall_angle <", "\"Forward ODO\" class MazeAction(Action): LEFT = -1 RIGHT = 1", "and is at {:3d}\".format(w.ds_angle, int(w.angle * 180 / math.pi), w.distance))", "super(MoveForwardOnOdo, self).end() def next(self): state = self.rover.getRoverState() do_stop = False", "4d} de={: 4d} d={: 4d} s={: 3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance),", "self.requested_heading)) self.rover.command(pyroslib.publish, self.speed, 0, self.distance) # pyroslib.publish(\"move/steer\", str(self.distance) + \"", "w.angle is None: print(\"Wall {:3d} -> is too far -", "= math.pi / 4 if front_distance < 450: angle +=", "self) # self.right_corner_action = MazeTurnAroundCornerAction(self.odo, self.radar, self.heading, self.RIGHT, self.distance, self.speed,", "and self.distances[wall.ds_angle] is not None: if preferable_wall.angle is not None:", "\" \" + str(self.speed)) def end(self): super(MazeTurnAroundCornerAction, self).end() def next(self):", "distances, lines, points): lmline = lines[self.left_mid_point_index] lline = lines[self.left_point_index] mline", "self class ChicaneAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None):", "steer_speed = speed * speed_steer_fudge_factor distance_speed = speed * speed_distance_fudge_factor", "= wall.wall_point_kind self.distances = {p: getPointDistance(state, p) for p in", "def setAngle(self, angle, distances): self.angle = angle distance = distances[self.mid_point_index]", "None and short_distance is not None: lsqrt2 = long_distance /", "next_action=None): super(MazeCorridorAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance self.speed", "wls = [self.walls[w_ds_angle] for w_ds_angle in self.WALLS if self.distances[w_ds_angle] is", "super(MazeCorridorAction, self).start() self.been_in_chicane = False def end(self): super(MazeCorridorAction, self).end() def", "log(LOG_LEVEL_DEBUG, \"Reset odo to \" + str(self.required_odo) + \"; starting...\")", "self.start_heading = state.heading.heading self.requested_heading = normaiseAngle(self.start_heading + 80 * -(1", "45, 90, 135, 180, 225, 270, 315] WALLS = [90,", "3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(0 * 180 / math.pi), int(distance_from_wall),", "super(MazeTurnAroundCornerAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance * (1", "= {0: 10, 45: SQRT2 * 10, 90: 10, 135:", "= left_point_index self.mid_point_index = mid_point_index self.right_point_index = right_point_index self.is_front_or_back =", "0, self.distance) # pyroslib.publish(\"move/steer\", str(self.distance) + \" \" + str(self.speed))", "L135_180 = 135 L180_225 = 180 L225_270 = 225 L270_315", "\"Finishing turning - no next action spectified.\") return self.next_action def", "a > PIhalf: a = a - math.pi elif a", "{:07.3f}\".format(self.distance, self.speed, self.start_heading, self.requested_heading)) self.rover.command(pyroslib.publish, self.speed, 0, self.distance) # pyroslib.publish(\"move/steer\",", "ld={: 4d} rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.right_corner_action if state.right_front_distance_of_wall", "if self.left_or_right == self.RIGHT: wall_angle = state.right_wall_angle if -min_angle <", "steer_speed / state.right_wall_angle if 0 <= distance < 150: distance", "self.LEFT else \"R\") class MazeCorridorAction(MazeAction): def __init__(self, agent, left_or_right, distance,", "self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, self.distance, self.speed, DriverForwardForTimeAction(self, 10, self.speed,", "wheel_name in WHEEL_NAMES: self.required_odo[wheel_name] = distance def start(self): super(MoveForwardOnOdo, self).start()", "= self.rover.getRoverState() self.start_heading = state.heading.heading self.requested_heading = normaiseAngle(self.start_heading + 80", "-math.pi / 4 else: try: angle = math.asin(distance_error / distance_speed)", "0, 225: 0, 270: 0, 315: 0} attitude = MazeAttitude()", "= lines[self.mid_point_index] rline = lines[self.right_point_index] dlong1 = distances[lline.long_point_index] dmid =", "self.lines: self.lines[line].calcAngle(self.distances) wls = [self.walls[w_ds_angle] for w_ds_angle in self.WALLS if", "# state.radar.radar[315] = SQRT2 * 17 # state.radar.radar[270] = SQRT2", "* 0.9 # state.radar.radar[315] = SQRT2 * 17 # state.radar.radar[270]", "self.next_action return self def execute(self): state = self.rover.getRoverState() front_distance =", "updateUndefinedWall(wall, preferable_wall, wall_adjust, second_wall): if wall.angle is None and self.distances[wall.ds_angle]", "0, 45: 0, 90: 0, 135: 0, 180: 0, 225:", "= points[self.right_point_index] if dlong1 < dlong2 and plong1 != MazeAttitude.UNKNOWN", "= distances[self.long_point_index] short_distance = distances[self.short_point_index] if long_distance is not None", "long_point_index, short_point_index, factor, adjust): self.line_index = line_index self.short_point_index = short_point_index", "BaseException as ex: log(LOG_LEVEL_ALWAYS, \"Domain error wa={: 3d} dw={: 4d}", "speed * speed_distance_fudge_factor if self.left_or_right == self.RIGHT: wall_angle = state.right_wall_angle", "self.adjust = adjust self.angle = None def calcAngle(self, distances): long_distance", "= 90 self.a3 = 135 else: self.a1 = 315 self.a2", "attitude.walls: printWall(w) print(\"----------------------------------------------------------\") # attitude.calculate(state) # printWalls() # # state.radar.radar[0]", "180 L225_270 = 225 L270_315 = 270 L315_0 = 315", "< 450: angle += math.pi * (450 - front_distance) /", "* self.factor + self.adjust) else: self.angle = None class Wall:", "log(LOG_LEVEL_INFO, \"Starting to turn around corner at distance {:04d} at", ".format(self.speed, heading, last_heading, angleDiference(heading, last_heading), self.requested_heading, self.error)) def getActionName(self): return", "ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d}", "self).__init__(agent) self.left_or_right = left_or_right self.distance = distance * (1 if", "self.speed = speed self.next_action = next_action if self.left_or_right == MazeAction.RIGHT:", "4 min_angle = 1 * math.pi / 180 steer_speed =", "def getActionName(self): return \"Turn-Around-Corner\" class DriverForwardForTimeAction(Action): def __init__(self, agent, time,", "pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind", "MazeAttitude.UNKNOWN and mline.angle * MazeAttitude.ANGLE_TOLLERANCE >= rline.angle >= mline.angle /", "+ str(self.speed) + \" \" + str(angle)) wheel_orientations = state.wheel_odos.odos", "and state.radar.radar_deltas[0] < 0: left_distances = state.radar.radar[270] + state.radar.radar[315] right_distances", "plong1 == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind if pmid == MazeAttitude.UNKNOWN:", "self.Line(self.L225_270, 225, 270, -1, PIhalf), self.L270_315: self.Line(self.L270_315, 315, 270, 1,", "WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel]) log(LOG_LEVEL_DEBUG, \"Reset odo to \" + str(self.required_odo)", "angle = -math.asin(distance_error / distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS,", "# pyroslib.publish(\"move/drive\", \"0 \" + str(self.speed)) log(LOG_LEVEL_INFO, \"Going forward for", "state.heading.heading last_heading = self.last_heading self.last_heading = heading log(LOG_LEVEL_INFO, \"Turning speed={:04d}", "not calculated\".format(w.ds_angle)) else: if w.distance is None: print(\"Wall {:3d} ->", "self.start_heading = 0 self.last_heading = 0 self.requested_heading = 0 self.pid", "(1 if left_or_right == self.RIGHT else -1) self.speed = speed", "if abs(distance - last_distance) < 100: return distance return None", "in attitude.points: printWallLines(p) for w in attitude.walls: printWall(w) print(\"----------------------------------------------------------\") #", "\"R\") class MazeCorridorAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None):", "= state.right_wall_angle if -min_angle < state.right_wall_angle < min_angle: distance =", "+ state.radar.radar[315] right_distances = state.radar.radar[90] + state.radar.radar[45] if left_distances >", "int(right_distances))) return self.right_corner_action if state.right_front_distance_of_wall > 100 and state.left_front_distance_of_wall >", "mline.angle >= lline.angle / MazeAttitude.ANGLE_TOLLERANCE: if plong1 == MazeAttitude.UNKNOWN: points[self.left_point_index]", "str(self.required_odo) + \"; starting...\") self.rover.command(pyroslib.publish, 300, 120) # pyroslib.publish(\"move/steer\", \"300", "is None and self.distances[wall.ds_angle] is not None: if preferable_wall.angle is", "5 # state.radar.radar[45] = SQRT2 * 5 * 0.9 #", "far - distance not calculated\".format(w.ds_angle, int(w.angle * 180 / math.pi)))", "MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] =", "< 0: distance = -150 distance_from_wall = state.left_wall_distance distance_error =", "/ 180 steer_speed = speed * speed_steer_fudge_factor distance_speed = speed", "wall_adjust, second_wall): if wall.angle is None and self.distances[wall.ds_angle] is not", "and pmid == MazeAttitude.UNKNOWN and plong2 != MazeAttitude.UNKNOWN: points[self.left_point_index] =", "wheel in self.required_odo: self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel]) log(LOG_LEVEL_DEBUG, \"Reset", "distances) elif plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and", "s={: 3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance = int(distance) angle", "self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance * 1), self.speed, self) self.right_corner_action", "self.NO_GAP self.right_gap = self.NO_GAP self.walls = {self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle: self.left_wall,", "self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) elif plong1 == MazeAttitude.UNKNOWN", "* 10, 270: 10, 315: SQRT2 * 10} radar_status =", "315 LINES = [L0_45, L45_90, L90_135, L135_180, L180_225, L225_270, L270_315,", "self.speed, None)) def start(self): super(ChicaneAction, self).start() def end(self): super(ChicaneAction, self).end()", "e={:07.3f}\" .format(self.speed, heading, last_heading, angleDiference(heading, last_heading), self.requested_heading, self.error)) def getActionName(self):", "self def execute(self): state = self.rover.getRoverState() front_distance = state.radar.radar[0] gain", "action \" + self.next_action.getActionName()) else: log(LOG_LEVEL_INFO, \"Finishing turning - no", "rline = lines[self.right_point_index] dlong1 = distances[lline.long_point_index] dmid = distances[mline.short_point_index] dlong2", "if do_stop: return self.stop_action else: return self def execute(self): pass", "-> line at {:3d} angle belogs to {:s}\".format(a, angle, wall))", "def end(self): pass def next(self): if self.time > 0: self.time", "None)) self.been_in_chicane = False def start(self): super(MazeCorridorAction, self).start() self.been_in_chicane =", "180: 0, 225: 0, 270: 0, 315: 0} def calculate(self,", "is not None: if preferable_wall.angle is not None: wall.setAngleAndDistance(self.normAngle(preferable_wall.angle +", "state = self.rover.getRoverState() self.start_heading = state.heading.heading self.requested_heading = normaiseAngle(self.start_heading +", "self.left_or_right = left_or_right self.distance = distance self.speed = speed self.next_action", "3d} ld={: 3d} ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={:", "state.left_front_distance_of_wall > 100 and front_distance < 700: log(LOG_LEVEL_INFO, \"Found final", "MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) def", "+ PIhalf) distance = distances[self.left_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle,", "/ MazeAttitude.ANGLE_TOLLERANCE: if plong1 == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind if", "angle = int(angle * 180 / math.pi) self.rover.command(pyroslib.publish, self.speed, angle,", "to finish, rfd={: 4d} fd={: 4d} \".format(int(state.right_front_distance_of_wall), int(front_distance))) return self.right_corner_action", "= math.sqrt(2) PIhalf = math.pi / 2 class MazeAttitude: UNKNOWN", "== MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind if plong2 == MazeAttitude.UNKNOWN: points[self.right_point_index]", "self.pid = PID(1, 0.0, 0.05, 1, 0, diff_method=angleDiference) self.pid.process(self.requested_heading, self.start_heading)", "270: 0, 315: 0} self.distances = {0: 0, 45: 0,", "= self.wall_point_kind if pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind if", "135, 180, 225, 270, 315] WALLS = [90, 270, 0,", "elif plong1 != MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2", "= next_action def start(self): self.rover.command(pyroslib.publish, self.speed, 0) # pyroslib.publish(\"move/drive\", \"0", "line at {:3d} angle belogs to {:s}\".format(a, angle, wall)) def", "-PIhalf)} self.right_wall = self.Wall(90, 2, self.RIGHT_WALL, 0, 45, 90, 135)", "int(0 * 180 / math.pi), int(0), int(0 * 180 /", "= \"front wall\" elif point == MazeAttitude.BACK_WALL: wall = \"back", "== MazeAttitude.RIGHT_WALL: wall = \"right wall\" elif point == MazeAttitude.FRONT_WALL:", "SQRT2 * 13 # state.radar.radar[225] = SQRT2 * 12 #", "- invoking next action \" + self.next_action.getActionName()) else: log(LOG_LEVEL_INFO, \"Finishing", "10, 180: 10, 225: SQRT2 * 10, 270: 10, 315:", "self.distance = 0 else: if self.is_front_or_back: self.distance = abs(int(math.sin(angle) *", "and by 180 -> 450/10 - 45deg elif distance_error <", "SQRT2 * 10, 270: 10, 315: SQRT2 * 10} radar_status", "if state.radar.radar[0] < 1.0 or state.radar.radar[315] < 1.0 or state.radar.radar[45]", "if lline.angle * MazeAttitude.ANGLE_TOLLERANCE >= mline.angle >= lline.angle / MazeAttitude.ANGLE_TOLLERANCE:", "> 800: log(LOG_LEVEL_INFO, \"Found end ofchicane - leaging, rfd={: 4d}\".format(int(diagonal_distance)))", "10} radar_status = {0: 0, 45: 0, 90: 0, 135:", "# state.radar.radar[45] = SQRT2 * 5 * 0.9 # state.radar.radar[315]", "/ math.pi), int(distance_from_wall), int(distance_error), int(0 * 180 / math.pi), int(0),", "= self.Wall(90, 2, self.RIGHT_WALL, 0, 45, 90, 135) self.left_wall =", "updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf, self.back_wall) # TODO calc gaps class MoveForwardOnOdo(Action):", "\"Finished turning around the corner - invoking next action \"", "agent, stop_action=None): super(MoveForwardOnOdo, self).__init__(agent) self.stop_action = stop_action self.required_odo = {'fl':", "TODO use odo to update to correct value! speed_steer_fudge_factor =", "delta={:4d}\".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0])) else: if state.left_front_distance_of_wall > 100 and front_distance", "4 + state.left_wall_angle) else: expected_diagonal_distance = front_distance * math.cos(state.left_wall_angle) *", "angle, distance) # pyroslib.publish(\"move/steer\", str(distance) + \" \" + str(self.speed)", "# import math import pyroslib import pyroslib.logging import time from", "self.error)) def getActionName(self): return \"Turn-Around-Corner\" class DriverForwardForTimeAction(Action): def __init__(self, agent,", "\"Chicane \" + (\"L\" if self.left_or_right == self.LEFT else \"R\")", "super(MazeAction, self).__init__(agent) def check_next_action_conditions(self): return self class ChicaneAction(MazeAction): def __init__(self,", "time self.speed = speed self.next_action = next_action def start(self): self.rover.command(pyroslib.publish,", "< 1.0 or state.radar.radar[45] < 1.0: do_stop = True if", "def printWallLines(a): if attitude.lines[a].angle is None: print(\"{:3d} -> point too", "self.FRONT_WALL, 270, 315, 0, 45) self.back_wall = self.Wall(180, 4, self.BACK_WALL,", "= 50 # mm/second - TODO use odo to update", "-min_angle < state.right_wall_angle < min_angle: distance = 1000000000 else: distance", "self.next_action = next_action def start(self): self.rover.command(pyroslib.publish, self.speed, 0) # pyroslib.publish(\"move/drive\",", "challenge_utils import Action, PID SQRT2 = math.sqrt(2) PIhalf = math.pi", "None: if lline.angle * MazeAttitude.ANGLE_TOLLERANCE >= mline.angle >= lline.angle /", "in WHEEL_NAMES: self.required_odo[wheel_name] = distance def start(self): super(MoveForwardOnOdo, self).start() state", "< dlong2 and plong1 != MazeAttitude.UNKNOWN and lmline.angle * MazeAttitude.ANGLE_TOLLERANCE", "-150 < distance < 0: distance = -150 distance =", "= wall_point_kind self.left_mid_point_index = left_mid_point_index self.left_point_index = left_point_index self.mid_point_index =", "= 225 L270_315 = 270 L315_0 = 315 LINES =", "== MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index]", "MoveForwardOnOdo(Action): def __init__(self, agent, stop_action=None): super(MoveForwardOnOdo, self).__init__(agent) self.stop_action = stop_action", "forward for \" + str(self.time) + \" ticks.\") return self", "else: angle = int(attitude.lines[a].angle * 180 / math.pi) point =", "150 # Values that worked speed=150, steer=5-7, dist=4 # self.speed", "elif dlong1 >= dlong2 and plong2 == MazeAttitude.UNKNOWN and pmid", "left_distances > right_distances: log(LOG_LEVEL_INFO, \"Found corner 2 - turning left,", "None: print(\"Wall {:3d} -> has angle {:3d} but is too", "points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) elif lline.angle is not None", "None and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.left_point_index]", "stop_action=None): super(MoveForwardOnOdo, self).__init__(agent) self.stop_action = stop_action self.required_odo = {'fl': 0,", "120\") def end(self): super(MoveForwardOnOdo, self).end() def next(self): state = self.rover.getRoverState()", "state.radar.radar[0] = 5 # state.radar.radar[45] = SQRT2 * 5 *", "self.left_wall = self.Wall(270, 6, self.LEFT_WALL, 180, 225, 270, 315) self.front_wall", "mid_point_index, right_point_index): self.ds_angle = distance_sensor_angle self.ds_index = distance_sensor_index self.wall_point_kind =", "L0_45 = 0 L45_90 = 45 L90_135 = 90 L135_180", "self.setAngle(lline.angle, distances) elif mline.angle is not None and pmid ==", "4d} fd={: 4d} \".format(int(state.right_front_distance_of_wall), int(front_distance))) return self.right_corner_action return self def", "p in attitude.points: printWallLines(p) for w in attitude.walls: printWall(w) print(\"----------------------------------------------------------\")", "printWalls() # # state.radar.radar[0] = 5 # state.radar.radar[45] = SQRT2", "\"Turn-Around-Corner\" class DriverForwardForTimeAction(Action): def __init__(self, agent, time, speed, next_action): super(DriverForwardForTimeAction,", "{:04d} at speed {:04d}, start heading {:07.3f}, requested heading {:07.3f}\".format(self.distance,", "{0: 0, 45: 0, 90: 0, 135: 0, 180: 0,", "odo={:7.2f}\".format( float(time.time()), int(front_distance), int(wall_angle * 180 / math.pi), int(distance_from_wall), int(distance_error),", "/ SQRT2 self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 - short_distance) * self.factor", "2 class MazeAttitude: UNKNOWN = 0 LEFT_WALL = 1 RIGHT_WALL", "= mid_point_index self.right_point_index = right_point_index self.is_front_or_back = self.ds_angle == 0", "LINES = [L0_45, L45_90, L90_135, L135_180, L180_225, L225_270, L270_315, L315_0]", "L90_135 = 90 L135_180 = 135 L180_225 = 180 L225_270", "execute(self): state = self.rover.getRoverState() left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0]", "= None self.next_action = next_action self.error = 0 def start(self):", "0: self.time -= 1 log(LOG_LEVEL_INFO, \"Going forward for \" +", "False def end(self): super(MazeCorridorAction, self).end() def next(self): left_diagonal_distance = state.radar.radar[315]", "angle += math.pi * (450 - front_distance) / 1800 #", "else: self.angle = None class Wall: def __init__(self, distance_sensor_angle, distance_sensor_index,", "dlong1 >= dlong2 and plong2 != MazeAttitude.UNKNOWN and mline.angle *", "-distance_speed: angle = math.pi / 4 else: try: angle =", "= self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) elif lline.angle is", "and distance_error < -distance_speed: angle = -math.pi / 4 if", "self.right_gap = self.NO_GAP self.walls = {self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle:", "points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) elif lline.angle", "divide with 10 and by 180 -> 450/10 - 45deg", "-1)) self.pid = PID(1, 0.0, 0.05, 1, 0, diff_method=angleDiference) self.pid.process(self.requested_heading,", "state.radar.radar_deltas[0])) else: if state.left_front_distance_of_wall > 100 and front_distance < 550:", "= self.rover.getRoverState() left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0] gain =", "self.last_heading self.last_heading = heading log(LOG_LEVEL_INFO, \"Turning speed={:04d} h={:07.3f} lh={:07.3f} dh={:07.3f}", "/ 4 if front_distance < 450: angle += math.pi *", "distances[mline.short_point_index] dlong2 = distances[mline.long_point_index] plong1 = points[self.left_point_index] pmid = points[self.mid_point_index]", "if state.radar.status[0] != 0 and abs(state.radar.radar_deltas[0]) > 100: log(LOG_LEVEL_INFO, \"Front", "return None def updateUndefinedWall(wall, preferable_wall, wall_adjust, second_wall): if wall.angle is", "next(self): heading = state.heading.heading self.error = self.pid.process(self.requested_heading, heading) if self.left_or_right", "L315_0] ANGLE_TOLLERANCE = 1.075 @staticmethod def normAngle(a): if a >", "expected_diagonal_distance = front_distance * 2 * math.cos(math.pi / 4 +", "LEFT_WALL = 1 RIGHT_WALL = 2 FRONT_WALL = 4 BACK_WALL", "self def execute(self): state = self.rover.getRoverState() left_diagonal_distance = state.radar.radar[315] front_distance", "== MazeAction.RIGHT: self.a1 = 45 self.a2 = 90 self.a3 =", "> distance_speed: angle = -math.pi / 4 if front_distance <", "0 def start(self): super(MazeTurnAroundCornerAction, self).start() state = self.rover.getRoverState() self.start_heading =", "-distance_speed: angle = -math.pi / 4 if front_distance < 450:", "/ MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[lline.long_point_index] angle = MazeAttitude.normAngle(mline.angle - PIhalf)", "True if do_stop: return self.stop_action else: return self def execute(self):", "plong1 != MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 ==", "self.distance = abs(int(math.sin(angle) * distance)) else: self.distance = abs(int(math.cos(angle) *", "correct: d={:4d} s={:2d} delta={:4d}\".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0])) else: if state.left_front_distance_of_wall >", "150 speed = 50 # mm/second - TODO use odo", "lline.angle is not None and mline.angle is not None: if", "is not None] wall_processing_order = sorted(wls, key=lambda wall: self.distances[wall.ds_angle]) for", "distance)) def setAngleAndDistance(self, angle, distance): self.angle = angle self.distance =", "super(MazeCorridorAction, self).end() def next(self): left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0]", "* -(1 if self.left_or_right == self.RIGHT else -1)) self.pid =", "int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return", "int(expected_diagonal_distance))) return self.left_corner_action if front_distance < 550 and state.radar.radar_deltas[0] <", "angle = 0 elif distance_error > 0 and distance_error >", "4d} s={: 3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: wall_angle =", "\"Starting to turn around corner at distance {:04d} at speed", "1 def __init__(self, agent): super(MazeAction, self).__init__(agent) def check_next_action_conditions(self): return self", "distance * (1 if left_or_right == self.RIGHT else -1) self.speed", "# pyroslib.publish(\"move/steer\", str(distance) + \" \" + str(self.speed) + \"", "distance_sensor_angle, distance_sensor_index, wall_point_kind, left_mid_point_index, left_point_index, mid_point_index, right_point_index): self.ds_angle = distance_sensor_angle", "state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]: do_stop = True if state.radar.radar[0] < 1.0", "# log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={:", "== 0: return distance last_distance = state.radar.last_radar[angle] if abs(distance -", "at {:3d} angle\".format(a, angle)) else: if point == MazeAttitude.LEFT_WALL: wall", "150 # 150 speed = 50 # mm/second - TODO", "60 offset = 150 # Values that worked speed=150, steer=5-7,", "Action, PID SQRT2 = math.sqrt(2) PIhalf = math.pi / 2", "< dlong2 and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:", "0 if abs(distance_error) < 10: angle = 0 elif distance_error", "self.short_point_index = short_point_index self.long_point_index = long_point_index self.factor = factor self.adjust", "225, 180, 1, -math.pi), self.L225_270: self.Line(self.L225_270, 225, 270, -1, PIhalf),", "self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif mline.angle is not", "int(distance_from_wall), int(distance_error), int(state.left_wall_angle * 180 / math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle *", "self.angle = None class Wall: def __init__(self, distance_sensor_angle, distance_sensor_index, wall_point_kind,", "= left_or_right self.distance = distance self.speed = speed self.next_action =", "__name__ == \"__main__\": from rover import Radar, RoverState radar_values =", "< distance < 0: distance = -150 distance = -distance", "180 / math.pi), int(distance_from_wall), int(distance_error), int(state.left_wall_angle * 180 / math.pi),", "if self.is_front_or_back: self.distance = abs(int(math.sin(angle) * distance)) else: self.distance =", "SQRT2 self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 - short_distance) * self.factor +", "left_or_right, distance, speed, next_action=None): super(MazeCorridorAction, self).__init__(agent) self.left_or_right = left_or_right self.distance", "0 and abs(state.radar.radar_deltas[0]) > 100: log(LOG_LEVEL_INFO, \"Front distance not correct:", "next(self): if self.left_or_right == self.LEFT: diagonal_distance = state.radar.radar[45] else: diagonal_distance", "0, 225: 0, 270: 0, 315: 0} def calculate(self, state):", "700: log(LOG_LEVEL_INFO, \"Found final corner - turning to finish, rfd={:", "for wall in wall_processing_order: wall.tryFindingWall(self.distances, self.lines, self.points) updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf,", "left_point_index self.mid_point_index = mid_point_index self.right_point_index = right_point_index self.is_front_or_back = self.ds_angle", "= math.pi / 4 elif distance_error < 0 and distance_error", "self.been_in_chicane = False def start(self): super(MazeCorridorAction, self).start() self.been_in_chicane = False", "1, -math.pi), self.L225_270: self.Line(self.L225_270, 225, 270, -1, PIhalf), self.L270_315: self.Line(self.L270_315,", "150 elif -150 < distance < 0: distance = -150", "225 L270_315 = 270 L315_0 = 315 LINES = [L0_45,", "135, 180, -1, math.pi), self.L180_225: self.Line(self.L180_225, 225, 180, 1, -math.pi),", "self.stop_action = stop_action self.required_odo = {'fl': 0, 'fr': 0, 'bl':", "MazeTurnAroundCornerAction(self, self.LEFT, self.distance, self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, self.distance,", "if attitude.lines[a].angle is None: print(\"{:3d} -> point too far -", "self.speed, 0, self.distance) # pyroslib.publish(\"move/steer\", str(self.distance) + \" \" +", "end(self): pass def next(self): if self.time > 0: self.time -=", "if point is None: print(\"{:3d} -> line at {:3d} angle\".format(a,", "225, 270, 315) self.front_wall = self.Wall(0, 0, self.FRONT_WALL, 270, 315,", "import time from pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG from", "= 2 POINTS = [0, 45, 90, 135, 180, 225,", "10, 315: SQRT2 * 10} radar_last_values = {0: 10, 45:", "self.walls = {self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle: self.back_wall}", "int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: distance = 1000000000 distance_from_wall =", "self.RIGHT else -1)) self.pid = PID(1, 0.0, 0.05, 1, 0,", "self.wall_point_kind self.setAngle(mline.angle, distances) def __init__(self): self.lines = {self.L315_0: self.Line(self.L315_0, 315,", "has angle {:3d} and is at {:3d}\".format(w.ds_angle, int(w.angle * 180", "wall\" else: wall = \"no wall\" print(\"{:3d} -> line at", "0 FORWARD_GAP = 1 SIDE_GAP = 2 POINTS = [0,", "= 150 # 150 speed = 50 # mm/second -", "{:3d} angle belogs to {:s}\".format(a, angle, wall)) def printWall(w): if", "# 5-7 speed_distance_fudge_factor = 4 # 4 min_angle = 1", "self.front_wall, PIhalf, self.back_wall) updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf, self.back_wall) # TODO calc", "0: return self elif self.left_or_right == self.RIGHT and self.error <", "else: if w.distance is None: print(\"Wall {:3d} -> has angle", "distances) def __init__(self): self.lines = {self.L315_0: self.Line(self.L315_0, 315, 0, -1,", "left_mid_point_index, left_point_index, mid_point_index, right_point_index): self.ds_angle = distance_sensor_angle self.ds_index = distance_sensor_index", "FRONT_WALL = 4 BACK_WALL = 8 NO_GAP = 0 FORWARD_GAP", "300 and left_diagonal_distance > expected_diagonal_distance * 1.2: log(LOG_LEVEL_INFO, \"Found chicane...", "= 0 self.requested_heading = 0 self.pid = None self.next_action =", "state.radar.radar[270] distance_error = distance_from_wall - self.distance angle = 0 if", "= front_distance * 2 * math.cos(math.pi / 4 + state.left_wall_angle)", "315 self.a2 = 270 self.a3 = 225 self.left_corner_action = MazeTurnAroundCornerAction(self,", "int(right_distances))) return self.left_corner_action else: log(LOG_LEVEL_INFO, \"Found corner 2 - turning", "Radar, RoverState radar_values = {0: 10, 45: SQRT2 * 10,", "1, 0, diff_method=angleDiference) self.pid.process(self.requested_heading, self.start_heading) log(LOG_LEVEL_INFO, \"Starting to turn around", "== MazeAttitude.FRONT_WALL: wall = \"front wall\" elif point == MazeAttitude.BACK_WALL:", "setAngleAndDistance(self, angle, distance): self.angle = angle self.distance = distance def", "math.cos(math.pi / 4 + state.left_wall_angle) else: expected_diagonal_distance = front_distance *", "else: if self.next_action is not None: log(LOG_LEVEL_INFO, \"Finished turning around", "float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return \"Chicane \" + (\"L\" if", "100: return distance return None def updateUndefinedWall(wall, preferable_wall, wall_adjust, second_wall):", "L90_135, L135_180, L180_225, L225_270, L270_315, L315_0] ANGLE_TOLLERANCE = 1.075 @staticmethod", "return self.left_corner_action if front_distance < 550 and state.radar.radar_deltas[0] < 0:", "= state.left_wall_distance distance_error = distance_from_wall - self.distance angle = 0", "= PID(1, 0.0, 0.05, 1, 0, diff_method=angleDiference) self.pid.process(self.requested_heading, self.start_heading) log(LOG_LEVEL_INFO,", "MazeAttitude() radar = Radar(0, radar_values, radar_status, Radar(0, radar_last_values, radar_status)) state", "* 180 / math.pi), int(state.right_front_distance_of_wall), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading),", "return a class Line: def __init__(self, line_index, long_point_index, short_point_index, factor,", "is None: print(\"Wall {:3d} -> is too far - not", "3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance = int(distance) angle =", "550 and state.radar.radar_deltas[0] < 0: left_distances = state.radar.radar[270] + state.radar.radar[315]", "self.distance = distance def tryFindingWall(self, distances, lines, points): lmline =", "status == 0: return distance last_distance = state.radar.last_radar[angle] if abs(distance", "w.distance is None: print(\"Wall {:3d} -> has angle {:3d} but", "distance = distances[self.left_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif", "wall\" print(\"{:3d} -> line at {:3d} angle belogs to {:s}\".format(a,", "= self.wall_point_kind self.setAngle(lline.angle, distances) elif plong1 != MazeAttitude.UNKNOWN and pmid", "self.left_or_right == MazeAction.RIGHT: self.a1 = 45 self.a2 = 90 self.a3", "= -math.pi / 4 else: try: angle = math.asin(distance_error /", "wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}\".format(int(0),", "execute(self): state = self.rover.getRoverState() heading = state.heading.heading last_heading = self.last_heading", "distances[self.mid_point_index] if distance < 1: self.distance = 0 else: if", "and pmid == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind", "# Values that worked speed=150, steer=5-7, dist=4 # self.speed =", "pyroslib.publish(\"move/steer\", str(self.distance) + \" \" + str(self.speed)) def end(self): super(MazeTurnAroundCornerAction,", "MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 != MazeAttitude.UNKNOWN: points[self.left_point_index]", "-PIhalf: a = a + math.pi return a class Line:", "MazeAttitude.BACK_WALL: wall = \"back wall\" else: wall = \"no wall\"", "* abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif dlong1 >= dlong2", "self.rover.getRoverState() for wheel in self.required_odo: self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel])", "180, 225, 270, 315] WALLS = [90, 270, 0, 180]", "self.distances = {p: getPointDistance(state, p) for p in self.POINTS} for", "odo to \" + str(self.required_odo) + \"; starting...\") self.rover.command(pyroslib.publish, 300,", "* SQRT2 if False and not self.been_in_chicane and front_distance >", "self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle: self.back_wall} self.points = {0: 0, 45: 0,", "has angle {:3d} but is too far - distance not", "distance_error < -distance_speed: angle = math.pi / 4 if front_distance", "= False log(LOG_LEVEL_DEBUG, \"Driving to \" + str(self.required_odo)) for wheel_name", "1 log(LOG_LEVEL_INFO, \"Going forward for \" + str(self.time) + \"", "return self class ChicaneAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed,", "0.05, 1, 0, diff_method=angleDiference) self.pid.process(self.requested_heading, self.start_heading) log(LOG_LEVEL_INFO, \"Starting to turn", "angle={: 3d} heading={: 3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(0 * 180", "/ state.left_wall_angle if 0 <= distance < 150: distance =", "= state.radar.radar[45] else: diagonal_distance = state.radar.radar[315] if self.left_or_right == self.LEFT", "0 self.last_heading = 0 self.requested_heading = 0 self.pid = None", "# # state.radar.radar[0] = 5 # state.radar.radar[45] = SQRT2 *", "adjust self.angle = None def calcAngle(self, distances): long_distance = distances[self.long_point_index]", "not correct: d={:4d} s={:2d} delta={:4d}\".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0])) else: if state.left_front_distance_of_wall", "state.left_wall_angle < 0: expected_diagonal_distance = front_distance * 2 * math.cos(math.pi", "points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) def __init__(self): self.lines = {self.L315_0:", "distance_from_wall - self.distance angle = 0 if abs(distance_error) < 10:", "speed = 50 # mm/second - TODO use odo to", "270: 0, 315: 0} attitude = MazeAttitude() radar = Radar(0,", "False log(LOG_LEVEL_DEBUG, \"Driving to \" + str(self.required_odo)) for wheel_name in", "SQRT2 * 5 * 0.9 # state.radar.radar[315] = SQRT2 *", "{:04d}, start heading {:07.3f}, requested heading {:07.3f}\".format(self.distance, self.speed, self.start_heading, self.requested_heading))", "# TODO calc gaps class MoveForwardOnOdo(Action): def __init__(self, agent, stop_action=None):", "\" \" + str(self.speed) + \" \" + str(angle)) wheel_orientations", "\" ticks.\") def end(self): pass def next(self): if self.time >", "180 -> 450/10 - 45deg elif distance_error < 0 and", "line_index, long_point_index, short_point_index, factor, adjust): self.line_index = line_index self.short_point_index =", "self.RIGHT, self.distance, self.speed, DriverForwardForTimeAction(self, 10, self.speed, None)) def start(self): super(ChicaneAction,", "\" \" + str(angle)) wheel_orientations = state.wheel_odos.odos log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={:", "1.075 @staticmethod def normAngle(a): if a > PIhalf: a =", "self.a2 = 90 self.a3 = 135 else: self.a1 = 315", "\" + str(angle)) wheel_orientations = state.wheel_odos.odos log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d}", "< 550 and state.radar.radar_deltas[0] < 0: left_distances = state.radar.radar[270] +", "- last_distance) < 100: return distance return None def updateUndefinedWall(wall,", "self.back_wall = self.Wall(180, 4, self.BACK_WALL, 90, 135, 180, 225) self.left_gap", "self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, self.distance, self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self,", "dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={: 3d} ra={: 3d}", "else: if dlong1 < dlong2 and plong1 == MazeAttitude.UNKNOWN and", "print(\"Wall {:3d} -> has angle {:3d} but is too far", "== self.RIGHT and diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found end ofchicane", "self.LEFT and diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found second part of", "4 else: try: angle = math.asin(distance_error / distance_speed) except BaseException", "angle distance = distances[self.mid_point_index] if distance < 1: self.distance =", "if dlong1 < dlong2 and plong1 != MazeAttitude.UNKNOWN and lmline.angle", "for w in attitude.walls: printWall(w) print(\"----------------------------------------------------------\") # attitude.calculate(state) # printWalls()", "= state.radar.last_radar[angle] if abs(distance - last_distance) < 100: return distance", "float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return \"Corridor\" class MazeTurnAroundCornerAction(MazeAction): def __init__(self,", "self.distance, self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, self.distance, self.speed, DriverForwardForTimeAction(self,", "last_heading = self.last_heading self.last_heading = heading log(LOG_LEVEL_INFO, \"Turning speed={:04d} h={:07.3f}", "ODO\" class MazeAction(Action): LEFT = -1 RIGHT = 1 def", "L225_270 = 225 L270_315 = 270 L315_0 = 315 LINES", "= 135 else: self.a1 = 315 self.a2 = 270 self.a3", "SQRT2 if False and not self.been_in_chicane and front_distance > 300", "None: wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust), self.distances[wall.mid_point_index]) else: wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust), self.distances[wall.mid_point_index])", "return self def execute(self): pass def getActionName(self): return \"Forward ODO\"", "= RoverState(None, None, None, radar, None, None) def printWallLines(a): if", "TODO calc gaps class MoveForwardOnOdo(Action): def __init__(self, agent, stop_action=None): super(MoveForwardOnOdo,", "None def setAngle(self, angle, distances): self.angle = angle distance =", "= -math.pi / 4 elif distance_error < 0 and distance_error", "3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={:", "self.rover.getRoverState() do_stop = False log(LOG_LEVEL_DEBUG, \"Driving to \" + str(self.required_odo))", "pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind if plong2 == MazeAttitude.UNKNOWN:", "dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) self.been_in_chicane = True", "distance not calculated\".format(w.ds_angle, int(w.angle * 180 / math.pi))) else: print(\"Wall", "= state.heading.heading self.requested_heading = normaiseAngle(self.start_heading + 80 * -(1 if", "4 else: try: angle = -math.asin(distance_error / distance_speed) except BaseException", "line at {:3d} angle\".format(a, angle)) else: if point == MazeAttitude.LEFT_WALL:", "angle, distances): self.angle = angle distance = distances[self.mid_point_index] if distance", "> distance_speed: angle = -math.pi / 4 elif distance_error <", "= points[lline.long_point_index] angle = MazeAttitude.normAngle(mline.angle - PIhalf) distance = distances[self.right_point_index]", "0: expected_diagonal_distance = front_distance * 2 * math.cos(math.pi / 4", "+= math.pi * (450 - front_distance) / 1800 # divide", "radar_values = {0: 10, 45: SQRT2 * 10, 90: 10,", "= self.Wall(0, 0, self.FRONT_WALL, 270, 315, 0, 45) self.back_wall =", "key=lambda wall: self.distances[wall.ds_angle]) for wall in wall_processing_order: wall.tryFindingWall(self.distances, self.lines, self.points)", "# pyroslib.publish(\"move/steer\", str(self.distance) + \" \" + str(self.speed)) def end(self):", "* 10} radar_status = {0: 0, 45: 0, 90: 0,", "d={: 4d} s={: 3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance =", "wall.angle is None and self.distances[wall.ds_angle] is not None: if preferable_wall.angle", "PIhalf) distance = distances[self.right_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance)", "angle = math.pi / 4 if front_distance < 450: angle", "LOG_LEVEL_DEBUG from rover import WheelOdos, WHEEL_NAMES from rover import normaiseAngle,", "abs(distance_error) < 10: angle = 0 elif distance_error > 0", "self.radar, self.heading, self.RIGHT, self.distance, self.speed, DriverForwardForTimeActoun(10, self.speed, None)) self.been_in_chicane =", "log(LOG_LEVEL_INFO, \"Found end ofchicane - leaging, rfd={: 4d}\".format(int(diagonal_distance))) return self.next_action", "def setAngleAndDistance(self, angle, distance): self.angle = angle self.distance = distance", "PIhalf, self.left_wall) updateUndefinedWall(self.right_wall, self.front_wall, PIhalf, self.back_wall) updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf, self.back_wall)", "self.left_mid_point_index = left_mid_point_index self.left_point_index = left_point_index self.mid_point_index = mid_point_index self.right_point_index", "math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle * 180 / math.pi), int(state.right_front_distance_of_wall), int(steer_speed), int(distance_speed),", "> 100 and front_distance < 700: log(LOG_LEVEL_INFO, \"Found final corner", "do_stop = True if state.radar.radar[0] < 1.0 or state.radar.radar[315] <", "ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) self.been_in_chicane = True return ChicaneAction(self,", "\"Reset odo to \" + str(self.required_odo) + \"; starting...\") self.rover.command(pyroslib.publish,", "self.distances[wall.ds_angle]) for wall in wall_processing_order: wall.tryFindingWall(self.distances, self.lines, self.points) updateUndefinedWall(self.front_wall, self.right_wall,", "plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 !=", "corner - turning to finish, rfd={: 4d} fd={: 4d} \".format(int(state.right_front_distance_of_wall),", "= {p: getPointDistance(state, p) for p in self.POINTS} for line", "# printWalls() # # state.radar.radar[0] = 5 # state.radar.radar[45] =", "self.next_action = next_action self.error = 0 def start(self): super(MazeTurnAroundCornerAction, self).start()", "state.radar.radar[315] right_distances = state.radar.radar[90] + state.radar.radar[45] if left_distances > right_distances:", "= self.NO_GAP self.right_gap = self.NO_GAP self.walls = {self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle:", "10: angle = 0 elif distance_error > 0 and distance_error", "return self elif self.left_or_right == self.RIGHT and self.error < 0:", "self.left_or_right == self.LEFT and self.error > 0: return self elif", "# # Copyright 2016-2019 Games Creators Club # # MIT", "adjust): self.line_index = line_index self.short_point_index = short_point_index self.long_point_index = long_point_index", "0 and distance_error > distance_speed: angle = -math.pi / 4", "speed self.next_action = next_action if self.left_or_right == MazeAction.RIGHT: self.a1 =", "and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.left_point_index] =", "180 / math.pi), int(distance_from_wall), int(distance_error), int(0 * 180 / math.pi),", "4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) return self.left_corner_action if front_distance", "= True if state.radar.radar[0] < 1.0 or state.radar.radar[315] < 1.0", "if long_distance is not None and short_distance is not None:", "== self.RIGHT: distance = -1000000000 distance_from_wall = state.radar.radar[90] distance_error =", "= MazeTurnAroundCornerAction(self.odo, self.radar, self.heading, self.RIGHT, self.distance, self.speed, DriverForwardForTimeActoun(10, self.speed, None))", "MazeTurnAroundCornerAction(self.odo, self.radar, self.heading, self.RIGHT, self.distance, self.speed, DriverForwardForTimeActoun(10, self.speed, None)) self.been_in_chicane", "self.Wall(270, 6, self.LEFT_WALL, 180, 225, 270, 315) self.front_wall = self.Wall(0,", "MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance * 1), self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self,", "self.distances[w_ds_angle] is not None] wall_processing_order = sorted(wls, key=lambda wall: self.distances[wall.ds_angle])", "= 45 self.a2 = 90 self.a3 = 135 else: self.a1", "None: print(\"{:3d} -> point too far - not calculated\".format(a)) else:", "factor self.adjust = adjust self.angle = None def calcAngle(self, distances):", "0 and distance_error < -distance_speed: angle = math.pi / 4", "LEFT = -1 RIGHT = 1 def __init__(self, agent): super(MazeAction,", "= None class Wall: def __init__(self, distance_sensor_angle, distance_sensor_index, wall_point_kind, left_mid_point_index,", "front_distance < 700: log(LOG_LEVEL_INFO, \"Found final corner - turning to", "and mline.angle * MazeAttitude.ANGLE_TOLLERANCE >= rline.angle >= mline.angle / MazeAttitude.ANGLE_TOLLERANCE:", "MazeTurnAroundCornerAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(MazeTurnAroundCornerAction, self).__init__(agent)", "angle): distance = state.radar.radar[angle] status = state.radar.status[angle] if status ==", "def getActionName(self): return \"Forward ODO\" class MazeAction(Action): LEFT = -1", "self.speed, 0) # pyroslib.publish(\"move/drive\", \"0 \" + str(self.speed)) log(LOG_LEVEL_INFO, \"Going", "== self.RIGHT else -1)) self.pid = PID(1, 0.0, 0.05, 1,", "= 1000000000 else: distance = steer_speed / state.left_wall_angle if 0", "Values that worked speed=150, steer=5-7, dist=4 # self.speed = 150", "\"no wall\" print(\"{:3d} -> line at {:3d} angle belogs to", "next_action if self.left_or_right == MazeAction.RIGHT: self.a1 = 45 self.a2 =", "lines[self.left_mid_point_index] lline = lines[self.left_point_index] mline = lines[self.mid_point_index] rline = lines[self.right_point_index]", "= self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif plong1 !=", "def __init__(self): self.lines = {self.L315_0: self.Line(self.L315_0, 315, 0, -1, math.pi),", "< 1: self.distance = 0 else: if self.is_front_or_back: self.distance =", "== self.RIGHT else -1) self.speed = speed self.start_heading = 0", "elif point == MazeAttitude.FRONT_WALL: wall = \"front wall\" elif point", "state = self.rover.getRoverState() left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0] gain", "angle = 0 if abs(distance_error) < 10: angle = 0", "execute(self): pass def getActionName(self): return \"Forward ODO\" class MazeAction(Action): LEFT", "None: if preferable_wall.angle is not None: wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust), self.distances[wall.mid_point_index])", "import Radar, RoverState radar_values = {0: 10, 45: SQRT2 *", "= 0 self.pid = None self.next_action = next_action self.error =", "= distances[mline.long_point_index] plong1 = points[self.left_point_index] pmid = points[self.mid_point_index] plong2 =", "next(self): left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0] if state.radar.status[0] !=", "- leaging, rfd={: 4d}\".format(int(diagonal_distance))) return self.next_action return self def execute(self):", "front_distance < 450: angle -= math.pi * (450 - front_distance)", "= 0 if abs(distance_error) < 10: angle = 0 elif", "state.radar.radar[0] if state.radar.status[0] != 0 and abs(state.radar.radar_deltas[0]) > 100: log(LOG_LEVEL_INFO,", "-1, PIhalf), self.L270_315: self.Line(self.L270_315, 315, 270, 1, -PIhalf)} self.right_wall =", "L135_180, L180_225, L225_270, L270_315, L315_0] ANGLE_TOLLERANCE = 1.075 @staticmethod def", "front_distance < 550 and state.radar.radar_deltas[0] < 0: left_distances = state.radar.radar[270]", "log(LOG_LEVEL_INFO, \"Front distance not correct: d={:4d} s={:2d} delta={:4d}\".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0]))", "4 BACK_WALL = 8 NO_GAP = 0 FORWARD_GAP = 1", "state.left_wall_angle if 0 <= distance < 150: distance = 150", "plong2 != MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle,", "self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif dlong1 >= dlong2", "heading) if self.left_or_right == self.LEFT and self.error > 0: return", "if preferable_wall.angle is not None: wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust), self.distances[wall.mid_point_index]) else:", "else: expected_diagonal_distance = front_distance * math.cos(state.left_wall_angle) * SQRT2 if False", "self.required_odo[wheel]) log(LOG_LEVEL_DEBUG, \"Reset odo to \" + str(self.required_odo) + \";", "is not None: lsqrt2 = long_distance / SQRT2 self.angle =", "* distance)) def setAngleAndDistance(self, angle, distance): self.angle = angle self.distance", "4d} fd={: 4d} dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance)))", "= lines[self.left_mid_point_index] lline = lines[self.left_point_index] mline = lines[self.mid_point_index] rline =", "self else: if self.next_action is not None: log(LOG_LEVEL_INFO, \"Finished turning", "self.next_action def execute(self): state = self.rover.getRoverState() heading = state.heading.heading last_heading", "+ str(self.time) + \" ticks.\") def end(self): pass def next(self):", "class MoveForwardOnOdo(Action): def __init__(self, agent, stop_action=None): super(MoveForwardOnOdo, self).__init__(agent) self.stop_action =", "and distance_error > distance_speed: angle = math.pi / 4 if", "1 RIGHT_WALL = 2 FRONT_WALL = 4 BACK_WALL = 8", "self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance * 1), self.speed,", "state.radar.radar[315] front_distance = state.radar.radar[0] gain = 60 offset = 150", "at speed {:04d}, start heading {:07.3f}, requested heading {:07.3f}\".format(self.distance, self.speed,", "dmid = distances[mline.short_point_index] dlong2 = distances[mline.long_point_index] plong1 = points[self.left_point_index] pmid", "self.RIGHT: wall_angle = state.right_wall_angle if -min_angle < state.right_wall_angle < min_angle:", "of chicane, rfd={: 4d}\".format(int(diagonal_distance))) self.left_or_right = self.RIGHT elif self.left_or_right ==", "distance_sensor_index self.wall_point_kind = wall_point_kind self.left_mid_point_index = left_mid_point_index self.left_point_index = left_point_index", "= \"left wall\" elif point == MazeAttitude.RIGHT_WALL: wall = \"right", "dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={: 3d}", "self.left_wall, self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle: self.back_wall} self.points = {0: 0, 45:", "MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif", "distance_error > distance_speed: angle = math.pi / 4 elif distance_error", "= 60 offset = 150 # Values that worked speed=150,", "SQRT2) self.setAngleAndDistance(angle, distance) elif lline.angle is not None and mline.angle", "rfd={: 4d}\".format(int(diagonal_distance))) return self.next_action return self def execute(self): state =", "state.left_front_distance_of_wall > 100 and front_distance < 550: expected_diagonal_distance = 0", "= 8 NO_GAP = 0 FORWARD_GAP = 1 SIDE_GAP =", "fd={: 4d} ld={: 4d} rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return self.left_corner_action", "math.pi * (450 - front_distance) / 1800 # divide with", "2 * math.cos(math.pi / 4 + state.left_wall_angle) else: expected_diagonal_distance =", "int(distance), int(speed))) else: distance = 1000000000 distance_from_wall = state.radar.radar[270] distance_error", "not None: wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust), self.distances[wall.mid_point_index]) else: wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust),", "state.radar.radar_deltas[0] < 0: left_distances = state.radar.radar[270] + state.radar.radar[315] right_distances =", "{self.L315_0: self.Line(self.L315_0, 315, 0, -1, math.pi), self.L0_45: self.Line(self.L0_45, 45, 0,", "Creators Club # # MIT License # import math import", "str(self.required_odo)) for wheel_name in WHEEL_NAMES: if state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]: do_stop", "< -distance_speed: angle = math.pi / 4 else: try: angle", "* 180 / math.pi), w.distance)) def printWalls(): for p in", "front_distance * 2 * math.cos(math.pi / 4 + state.left_wall_angle) else:", "self.rover.getRoverState() heading = state.heading.heading last_heading = self.last_heading self.last_heading = heading", "expected_diagonal_distance = 0 if state.left_wall_angle < 0: expected_diagonal_distance = front_distance", "< state.right_wall_angle < min_angle: distance = 1000000000 else: distance =", "self.setAngle(lline.angle, distances) elif plong1 != MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN", "-> point too far - not calculated\".format(a)) else: angle =", "speed_distance_fudge_factor if self.left_or_right == self.RIGHT: wall_angle = state.right_wall_angle if -min_angle", "self.setAngle(lline.angle, distances) elif dlong1 >= dlong2 and plong2 == MazeAttitude.UNKNOWN", "points[self.mid_point_index] = points[lline.long_point_index] angle = MazeAttitude.normAngle(mline.angle - PIhalf) distance =", "- short_distance) * self.factor + self.adjust) else: self.angle = None", "== MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index]", "-1000000000 distance_from_wall = state.radar.radar[90] distance_error = distance_from_wall - self.distance angle", "self.required_odo[wheel_name]: do_stop = True if state.radar.radar[0] < 1.0 or state.radar.radar[315]", "short_point_index self.long_point_index = long_point_index self.factor = factor self.adjust = adjust", "= state.radar.radar[270] distance_error = distance_from_wall - self.distance angle = 0", "4d} dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) self.been_in_chicane =", "elif -150 < distance < 0: distance = -150 distance", "super(ChicaneAction, self).end() def next(self): if self.left_or_right == self.LEFT: diagonal_distance =", "state.radar.radar[315] if self.left_or_right == self.LEFT and diagonal_distance > 800: log(LOG_LEVEL_INFO,", "state.radar.radar[315] front_distance = state.radar.radar[0] if state.radar.status[0] != 0 and abs(state.radar.radar_deltas[0])", "{:3d}\".format(w.ds_angle, int(w.angle * 180 / math.pi), w.distance)) def printWalls(): for", "= self.wall_point_kind self.setAngle(mline.angle, distances) elif lline.angle is not None and", "self.speed, DriverForwardForTimeAction(self, 10, self.speed, None)) def start(self): super(ChicaneAction, self).start() def", "if state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]: do_stop = True if state.radar.radar[0] <", "distance_error < -distance_speed: angle = -math.pi / 4 if front_distance", "> 0: self.time -= 1 log(LOG_LEVEL_INFO, \"Going forward for \"", "100 and front_distance < 700: log(LOG_LEVEL_INFO, \"Found final corner -", "= state.wheel_odos.odos # log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={:", "float(time.time()), int(front_distance), int(wall_angle * 180 / math.pi), int(distance_from_wall), int(distance_error), int(state.left_wall_angle", "left_or_right, distance, speed, next_action=None): super(MazeTurnAroundCornerAction, self).__init__(agent) self.left_or_right = left_or_right self.distance", "self.distance = abs(int(math.cos(angle) * distance)) def setAngleAndDistance(self, angle, distance): self.angle", "NO_GAP = 0 FORWARD_GAP = 1 SIDE_GAP = 2 POINTS", "= [self.walls[w_ds_angle] for w_ds_angle in self.WALLS if self.distances[w_ds_angle] is not", "int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance = int(distance) angle = int(angle", "distances): self.angle = angle distance = distances[self.mid_point_index] if distance <", "MazeAttitude.ANGLE_TOLLERANCE: if plong1 == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind if pmid", "= sorted(wls, key=lambda wall: self.distances[wall.ds_angle]) for wall in wall_processing_order: wall.tryFindingWall(self.distances,", "270, 0, 180] L0_45 = 0 L45_90 = 45 L90_135", "'fr': 0, 'bl': 0, 'br': 0} def setRequiredOdo(self, distance): for", "10} radar_last_values = {0: 10, 45: SQRT2 * 10, 90:", "self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, self.distance, self.speed, DriverForwardForTimeAction(self, 10,", "points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif mline.angle is not None", "== 0 or self.ds_angle == 180 self.selected_line = None self.angle", "start heading {:07.3f}, requested heading {:07.3f}\".format(self.distance, self.speed, self.start_heading, self.requested_heading)) self.rover.command(pyroslib.publish,", "None def updateUndefinedWall(wall, preferable_wall, wall_adjust, second_wall): if wall.angle is None", "wall_adjust), self.distances[wall.mid_point_index]) self.points[wall.ds_angle] = wall.wall_point_kind self.distances = {p: getPointDistance(state, p)", "int(distance_error), int(state.left_wall_angle * 180 / math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle * 180", "= 0 else: if self.is_front_or_back: self.distance = abs(int(math.sin(angle) * distance))", "self).__init__(agent) def check_next_action_conditions(self): return self class ChicaneAction(MazeAction): def __init__(self, agent,", "self.adjust) else: self.angle = None class Wall: def __init__(self, distance_sensor_angle,", "elif plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2", "distance, speed, next_action=None): super(ChicaneAction, self).__init__(agent) self.left_or_right = left_or_right self.distance =", "points): lmline = lines[self.left_mid_point_index] lline = lines[self.left_point_index] mline = lines[self.mid_point_index]", "self.pid = None self.next_action = next_action self.error = 0 def", "self.right_wall, PIhalf, self.left_wall) updateUndefinedWall(self.right_wall, self.front_wall, PIhalf, self.back_wall) updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf,", "PIhalf), self.L90_135: self.Line(self.L90_135, 135, 90, 1, -PIhalf), self.L135_180: self.Line(self.L135_180, 135,", "int(left_diagonal_distance), int(expected_diagonal_distance))) self.been_in_chicane = True return ChicaneAction(self, self.LEFT, self.distance, self.speed,", "1.0: do_stop = True if do_stop: return self.stop_action else: return", "self.lines = {self.L315_0: self.Line(self.L315_0, 315, 0, -1, math.pi), self.L0_45: self.Line(self.L0_45,", "= -150 distance_from_wall = state.left_wall_distance distance_error = distance_from_wall - self.distance", "not self.been_in_chicane and front_distance > 300 and left_diagonal_distance > expected_diagonal_distance", "- turning, lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall),", "PIhalf = math.pi / 2 class MazeAttitude: UNKNOWN = 0", "elif distance_error < 0 and distance_error < -distance_speed: angle =", "0, 1, -math.pi), self.L45_90: self.Line(self.L45_90, 45, 90, -1, PIhalf), self.L90_135:", "distance_error < 0 and distance_error < -distance_speed: angle = math.pi", "at distance {:04d} at speed {:04d}, start heading {:07.3f}, requested", "a + math.pi return a class Line: def __init__(self, line_index,", "for \" + str(self.time) + \" ticks.\") return self return", "= self.rover.getRoverState() heading = state.heading.heading last_heading = self.last_heading self.last_heading =", "right_distances: log(LOG_LEVEL_INFO, \"Found corner 2 - turning left, fd={: 4d}", "!= MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN:", "attitude = MazeAttitude() radar = Radar(0, radar_values, radar_status, Radar(0, radar_last_values,", "3d} la={: 3d} ld={: 3d} ra={: 3d} rd={: 3d} s_spd={:", "180 / math.pi), w.distance)) def printWalls(): for p in attitude.points:", "< -distance_speed: angle = -math.pi / 4 if front_distance <", "wall = \"right wall\" elif point == MazeAttitude.FRONT_WALL: wall =", "not None: if preferable_wall.angle is not None: wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust),", "next_action=None): super(ChicaneAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance self.speed", "* math.cos(state.left_wall_angle) * SQRT2 if False and not self.been_in_chicane and", "= self.ds_angle == 0 or self.ds_angle == 180 self.selected_line =", "lmline = lines[self.left_mid_point_index] lline = lines[self.left_point_index] mline = lines[self.mid_point_index] rline", "attitude.lines[a].angle is None: print(\"{:3d} -> point too far - not", "80 * -(1 if self.left_or_right == self.RIGHT else -1)) self.pid", "self.NO_GAP self.walls = {self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle:", "int(front_distance), int(wall_angle * 180 / math.pi), int(distance_from_wall), int(distance_error), int(state.left_wall_angle *", "< 10: angle = 0 elif distance_error > 0 and", "speed self.next_action = next_action def start(self): self.rover.command(pyroslib.publish, self.speed, 0) #", "self.long_point_index = long_point_index self.factor = factor self.adjust = adjust self.angle", "= distance self.speed = speed self.next_action = next_action if self.left_or_right", "# MIT License # import math import pyroslib import pyroslib.logging", "in self.lines: self.lines[line].calcAngle(self.distances) wls = [self.walls[w_ds_angle] for w_ds_angle in self.WALLS", "10, 45: SQRT2 * 10, 90: 10, 135: SQRT2 *", "left, fd={: 4d} ld={: 4d} rd={: 4d}\".format(int(front_distance), int(left_distances), int(right_distances))) return", "class MazeTurnAroundCornerAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(MazeTurnAroundCornerAction,", "points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) elif plong1 == MazeAttitude.UNKNOWN and", "= 315 self.a2 = 270 self.a3 = 225 self.left_corner_action =", "def end(self): super(ChicaneAction, self).end() def next(self): if self.left_or_right == self.LEFT:", "if self.left_or_right == self.RIGHT: distance = -1000000000 distance_from_wall = state.radar.radar[90]", "* 10, 270: 10, 315: SQRT2 * 10} radar_last_values =", "if state.left_wall_angle < 0: expected_diagonal_distance = front_distance * 2 *", "0} self.distances = {0: 0, 45: 0, 90: 0, 135:", "= front_distance * math.cos(state.left_wall_angle) * SQRT2 if False and not", "state.left_wall_angle) else: expected_diagonal_distance = front_distance * math.cos(state.left_wall_angle) * SQRT2 if", "* 180 / math.pi), int(0), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading),", "* 180 / math.pi), int(distance_from_wall), int(distance_error), int(state.left_wall_angle * 180 /", "None and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN: points[self.mid_point_index]", "3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={:", "MIT License # import math import pyroslib import pyroslib.logging import", "= self.wall_point_kind self.setAngle(mline.angle, distances) else: if dlong1 < dlong2 and", "return self else: if self.next_action is not None: log(LOG_LEVEL_INFO, \"Finished", "= state.wheel_odos.odos log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d}", "1: self.distance = 0 else: if self.is_front_or_back: self.distance = abs(int(math.sin(angle)", "factor, adjust): self.line_index = line_index self.short_point_index = short_point_index self.long_point_index =", "and distance_error < -distance_speed: angle = math.pi / 4 else:", "point = attitude.points[a] if point is None: print(\"{:3d} -> line", "{'fl': 0, 'fr': 0, 'bl': 0, 'br': 0} def setRequiredOdo(self,", "800: log(LOG_LEVEL_INFO, \"Found end ofchicane - leaging, rfd={: 4d}\".format(int(diagonal_distance))) return", "dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) return self.left_corner_action if", "-(1 if self.left_or_right == self.RIGHT else -1)) self.pid = PID(1,", "> 300 and left_diagonal_distance > expected_diagonal_distance * 1.2: log(LOG_LEVEL_INFO, \"Found", "= distance def tryFindingWall(self, distances, lines, points): lmline = lines[self.left_mid_point_index]", "in WHEEL_NAMES: if state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]: do_stop = True if", "distance = distances[self.mid_point_index] if distance < 1: self.distance = 0", "\" \" + str(angle)) wheel_orientations = state.wheel_odos.odos # log(LOG_LEVEL_INFO, \"{:16.3f}:", "SQRT2 = math.sqrt(2) PIhalf = math.pi / 2 class MazeAttitude:", "def execute(self): state = self.rover.getRoverState() front_distance = state.radar.radar[0] gain =", "state.radar.radar[45] if left_distances > right_distances: log(LOG_LEVEL_INFO, \"Found corner 2 -", "- wall_adjust), self.distances[wall.mid_point_index]) self.points[wall.ds_angle] = wall.wall_point_kind self.distances = {p: getPointDistance(state,", "is None: print(\"{:3d} -> point too far - not calculated\".format(a))", "!= MazeAttitude.UNKNOWN and lmline.angle * MazeAttitude.ANGLE_TOLLERANCE >= lline.angle >= lmline.angle", "= 1000000000 else: distance = steer_speed / state.right_wall_angle if 0", "< 0 and distance_error < -distance_speed: angle = -math.pi /", "ld={: 3d} ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d}", "{:07.3f}, requested heading {:07.3f}\".format(self.distance, self.speed, self.start_heading, self.requested_heading)) self.rover.command(pyroslib.publish, self.speed, 0,", "math.cos(state.left_wall_angle) * SQRT2 if False and not self.been_in_chicane and front_distance", "* 180 / math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle * 180 / math.pi),", "self.left_or_right == self.LEFT: diagonal_distance = state.radar.radar[45] else: diagonal_distance = state.radar.radar[315]", "else: log(LOG_LEVEL_INFO, \"Found corner 2 - turning left, fd={: 4d}", "45) self.back_wall = self.Wall(180, 4, self.BACK_WALL, 90, 135, 180, 225)", "self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance * 1), self.speed, self)", "MazeAttitude.normAngle(mline.angle - PIhalf) distance = distances[self.right_point_index] * abs(math.sin(mline.angle) / SQRT2)", "* 180 / math.pi), int(distance_from_wall), int(distance_error), int(0 * 180 /", "a <= -PIhalf: a = a + math.pi return a", "# attitude.calculate(state) # printWalls() state.radar.radar[180] = 50 state.radar.radar[315] = 30", "0: distance = -150 distance = -distance distance_from_wall = state.right_wall_distance", "self.error = self.pid.process(self.requested_heading, heading) if self.left_or_right == self.LEFT and self.error", "-1) self.speed = speed self.start_heading = 0 self.last_heading = 0", "str(self.time) + \" ticks.\") def end(self): pass def next(self): if", "None, radar, None, None) def printWallLines(a): if attitude.lines[a].angle is None:", "self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif plong1 != MazeAttitude.UNKNOWN", "end ofchicane - leaging, rfd={: 4d}\".format(int(diagonal_distance))) return self.next_action return self", "= WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel]) log(LOG_LEVEL_DEBUG, \"Reset odo to \" +", "speed=150, steer=5-7, dist=4 # self.speed = 150 # 150 speed", "__init__(self, agent, left_or_right, distance, speed, next_action=None): super(ChicaneAction, self).__init__(agent) self.left_or_right =", "= attitude.points[a] if point is None: print(\"{:3d} -> line at", "0 L45_90 = 45 L90_135 = 90 L135_180 = 135", "math.pi), int(distance_from_wall), int(distance_error), int(0 * 180 / math.pi), int(0), int(0", "self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 - short_distance) * self.factor + self.adjust)", "10 and by 180 -> 450/10 - 45deg else: try:", "= 225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, self.distance, self.speed, self) self.right_corner_action", "if False and not self.been_in_chicane and front_distance > 300 and", "self.distance, self.speed, next_action=self) else: log(LOG_LEVEL_INFO, \"Found corner - turning, lfd={:", "long_distance is not None and short_distance is not None: lsqrt2", "= self.Wall(180, 4, self.BACK_WALL, 90, 135, 180, 225) self.left_gap =", "else: diagonal_distance = state.radar.radar[315] if self.left_or_right == self.LEFT and diagonal_distance", "abs(state.radar.radar_deltas[0]) > 100: log(LOG_LEVEL_INFO, \"Front distance not correct: d={:4d} s={:2d}", "\"Found chicane... lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall),", "{:3d} -> is too far - not calculated\".format(w.ds_angle)) else: if", "270 self.a3 = 225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, self.distance, self.speed,", "setRequiredOdo(self, distance): for wheel_name in WHEEL_NAMES: self.required_odo[wheel_name] = distance def", "270, -1, PIhalf), self.L270_315: self.Line(self.L270_315, 315, 270, 1, -PIhalf)} self.right_wall", "invoking next action \" + self.next_action.getActionName()) else: log(LOG_LEVEL_INFO, \"Finishing turning", "distance_error > distance_speed: angle = -math.pi / 4 elif distance_error", "self.heading, self.RIGHT, self.distance, self.speed, DriverForwardForTimeActoun(10, self.speed, None)) self.been_in_chicane = False", "0, 270: 0, 315: 0} attitude = MazeAttitude() radar =", "* 13 # state.radar.radar[225] = SQRT2 * 12 # attitude.calculate(state)", "do_stop = False log(LOG_LEVEL_DEBUG, \"Driving to \" + str(self.required_odo)) for", "left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0] gain = 60 offset", "ChicaneAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(ChicaneAction, self).__init__(agent)", "calculated\".format(a)) else: angle = int(attitude.lines[a].angle * 180 / math.pi) point", "+ \" \" + str(self.speed) + \" \" + str(angle))", "distance_speed = speed * speed_distance_fudge_factor if self.left_or_right == self.RIGHT: wall_angle", "steer_speed / state.left_wall_angle if 0 <= distance < 150: distance", "MazeAttitude.RIGHT_WALL: wall = \"right wall\" elif point == MazeAttitude.FRONT_WALL: wall", "= state.radar.radar[0] if state.radar.status[0] != 0 and abs(state.radar.radar_deltas[0]) > 100:", "> 100: log(LOG_LEVEL_INFO, \"Front distance not correct: d={:4d} s={:2d} delta={:4d}\".format(front_distance,", "self.been_in_chicane and front_distance > 300 and left_diagonal_distance > expected_diagonal_distance *", "+ str(angle)) wheel_orientations = state.wheel_odos.odos # log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d}", "state.radar.status[0], state.radar.radar_deltas[0])) else: if state.left_front_distance_of_wall > 100 and front_distance <", "{:3d} -> has angle {:3d} and is at {:3d}\".format(w.ds_angle, int(w.angle", "angle = int(attitude.lines[a].angle * 180 / math.pi) point = attitude.points[a]", "self.next_action = next_action if self.left_or_right == MazeAction.RIGHT: self.a1 = 45", "diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found second part of chicane, rfd={:", "90, 135, 180, 225) self.left_gap = self.NO_GAP self.right_gap = self.NO_GAP", "points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) def __init__(self):", "wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}\".format(int(wall_angle),", "self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) elif lline.angle is not", "if status == 0: return distance last_distance = state.radar.last_radar[angle] if", "distances): long_distance = distances[self.long_point_index] short_distance = distances[self.short_point_index] if long_distance is", "> 0 and distance_error > distance_speed: angle = -math.pi /", "else: distance = steer_speed / state.right_wall_angle if 0 <= distance", "super(MazeTurnAroundCornerAction, self).end() def next(self): heading = state.heading.heading self.error = self.pid.process(self.requested_heading,", "if self.time > 0: self.time -= 1 log(LOG_LEVEL_INFO, \"Going forward", "by 180 -> 450/10 - 45deg else: try: angle =", "self.setAngle(mline.angle, distances) else: if dlong1 < dlong2 and plong1 ==", "pyroslib import pyroslib.logging import time from pyroslib.logging import log, LOG_LEVEL_ALWAYS,", "not None: lsqrt2 = long_distance / SQRT2 self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2,", "* 5 * 0.9 # state.radar.radar[315] = SQRT2 * 17", "+ self.adjust) else: self.angle = None class Wall: def __init__(self,", "{:s}\".format(a, angle, wall)) def printWall(w): if w.angle is None: print(\"Wall", "= state.radar.radar[90] distance_error = distance_from_wall - self.distance angle = 0", "self.rover.command(pyroslib.publish, self.speed, 0) # pyroslib.publish(\"move/drive\", \"0 \" + str(self.speed)) log(LOG_LEVEL_INFO,", "== self.RIGHT and self.error < 0: return self else: if", "= 0 self.last_heading = 0 self.requested_heading = 0 self.pid =", "return self return self.next_action if __name__ == \"__main__\": from rover", "and by 180 -> 450/10 - 45deg else: try: angle", "def tryFindingWall(self, distances, lines, points): lmline = lines[self.left_mid_point_index] lline =", "PIhalf, self.back_wall) updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf, self.back_wall) # TODO calc gaps", "= 225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance * 1), self.speed,", "angle = math.pi / 4 else: try: angle = -math.asin(distance_error", "left_or_right self.distance = distance self.speed = speed self.next_action = next_action", "4d} s={: 3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: distance =", "fd={: 4d} dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) return", "= [0, 45, 90, 135, 180, 225, 270, 315] WALLS", "distance_error < -distance_speed: angle = math.pi / 4 else: try:", "speed_steer_fudge_factor = 5 # 5-7 speed_distance_fudge_factor = 4 # 4", "odo to update to correct value! speed_steer_fudge_factor = 5 #", "# pyroslib.publish(\"move/steer\", \"300 120\") def end(self): super(MoveForwardOnOdo, self).end() def next(self):", "POINTS = [0, 45, 90, 135, 180, 225, 270, 315]", "state.radar.radar[225] = SQRT2 * 12 # attitude.calculate(state) # printWalls() state.radar.radar[180]", "< 150: distance = 150 elif -150 < distance <", "int(state.right_front_distance_of_wall), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self):", "self.right_wall, -PIhalf, self.left_wall) updateUndefinedWall(self.back_wall, self.right_wall, PIhalf, self.left_wall) updateUndefinedWall(self.right_wall, self.front_wall, PIhalf,", "ANGLE_TOLLERANCE = 1.075 @staticmethod def normAngle(a): if a > PIhalf:", "diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found end ofchicane - leaging, rfd={:", "try: angle = math.asin(distance_error / distance_speed) except BaseException as ex:", "+ str(self.speed)) log(LOG_LEVEL_INFO, \"Going forward for \" + str(self.time) +", "self.wall_point_kind self.setAngle(mline.angle, distances) else: if dlong1 < dlong2 and plong1", "MazeAttitude.ANGLE_TOLLERANCE >= lline.angle >= lmline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[lline.long_point_index]", "belogs to {:s}\".format(a, angle, wall)) def printWall(w): if w.angle is", "to correct value! speed_steer_fudge_factor = 5 # 5-7 speed_distance_fudge_factor =", "= 180 L225_270 = 225 L270_315 = 270 L315_0 =", "= abs(int(math.sin(angle) * distance)) else: self.distance = abs(int(math.cos(angle) * distance))", "1), self.speed, self) # self.right_corner_action = MazeTurnAroundCornerAction(self.odo, self.radar, self.heading, self.RIGHT,", "550: expected_diagonal_distance = 0 if state.left_wall_angle < 0: expected_diagonal_distance =", "return self.right_corner_action if state.right_front_distance_of_wall > 100 and state.left_front_distance_of_wall > 100", "in self.required_odo: self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel]) log(LOG_LEVEL_DEBUG, \"Reset odo", ")) def getActionName(self): return \"Corridor\" class MazeTurnAroundCornerAction(MazeAction): def __init__(self, agent,", "- not calculated\".format(a)) else: angle = int(attitude.lines[a].angle * 180 /", "min_angle: distance = 1000000000 else: distance = steer_speed / state.left_wall_angle", "getPointDistance(state, angle): distance = state.radar.radar[angle] status = state.radar.status[angle] if status", "__init__(self, line_index, long_point_index, short_point_index, factor, adjust): self.line_index = line_index self.short_point_index", "+ self.next_action.getActionName()) else: log(LOG_LEVEL_INFO, \"Finishing turning - no next action", "- distance not calculated\".format(w.ds_angle, int(w.angle * 180 / math.pi))) else:", "lh={:07.3f} dh={:07.3f} rh={:07.3f} e={:07.3f}\" .format(self.speed, heading, last_heading, angleDiference(heading, last_heading), self.requested_heading,", "17 # state.radar.radar[270] = SQRT2 * 13 # state.radar.radar[225] =", "wall\" elif point == MazeAttitude.BACK_WALL: wall = \"back wall\" else:", "self.Line(self.L270_315, 315, 270, 1, -PIhalf)} self.right_wall = self.Wall(90, 2, self.RIGHT_WALL,", "180 / math.pi), int(state.right_front_distance_of_wall), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl'])", "# mm/second - TODO use odo to update to correct", "= self.wall_point_kind self.setAngle(mline.angle, distances) def __init__(self): self.lines = {self.L315_0: self.Line(self.L315_0,", "distance_error > 0 and distance_error > distance_speed: angle = math.pi", "elif point == MazeAttitude.RIGHT_WALL: wall = \"right wall\" elif point", "tryFindingWall(self, distances, lines, points): lmline = lines[self.left_mid_point_index] lline = lines[self.left_point_index]", "= 0 def start(self): super(MazeTurnAroundCornerAction, self).start() state = self.rover.getRoverState() self.start_heading", "-math.pi), self.L225_270: self.Line(self.L225_270, 225, 270, -1, PIhalf), self.L270_315: self.Line(self.L270_315, 315,", "> 100 and state.left_front_distance_of_wall > 100 and front_distance < 700:", "int(w.angle * 180 / math.pi), w.distance)) def printWalls(): for p", "log(LOG_LEVEL_INFO, \"Finishing turning - no next action spectified.\") return self.next_action", "and self.error > 0: return self elif self.left_or_right == self.RIGHT", "wall.tryFindingWall(self.distances, self.lines, self.points) updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf, self.left_wall) updateUndefinedWall(self.back_wall, self.right_wall, PIhalf,", "next_action self.error = 0 def start(self): super(MazeTurnAroundCornerAction, self).start() state =", "= -1000000000 distance_from_wall = state.radar.radar[90] distance_error = distance_from_wall - self.distance", "= 135 L180_225 = 180 L225_270 = 225 L270_315 =", "distances[self.short_point_index] if long_distance is not None and short_distance is not", "def next(self): state = self.rover.getRoverState() do_stop = False log(LOG_LEVEL_DEBUG, \"Driving", "angleDiference(heading, last_heading), self.requested_heading, self.error)) def getActionName(self): return \"Turn-Around-Corner\" class DriverForwardForTimeAction(Action):", "UNKNOWN = 0 LEFT_WALL = 1 RIGHT_WALL = 2 FRONT_WALL", "-math.pi), self.L45_90: self.Line(self.L45_90, 45, 90, -1, PIhalf), self.L90_135: self.Line(self.L90_135, 135,", "angle = MazeAttitude.normAngle(mline.angle - PIhalf) distance = distances[self.right_point_index] * abs(math.sin(mline.angle)", "int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: wall_angle = state.left_wall_angle if -min_angle", "4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) self.been_in_chicane = True return ChicaneAction(self, self.LEFT,", "short_distance) * self.factor + self.adjust) else: self.angle = None class", "= self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) def __init__(self): self.lines", "def getActionName(self): return \"Chicane \" + (\"L\" if self.left_or_right ==", "self).end() def next(self): heading = state.heading.heading self.error = self.pid.process(self.requested_heading, heading)", "10, self.speed, None)) def start(self): super(ChicaneAction, self).start() def end(self): super(ChicaneAction,", "\" + str(self.speed) + \" \" + str(angle)) wheel_orientations =", "angle, wall)) def printWall(w): if w.angle is None: print(\"Wall {:3d}", "super(MazeCorridorAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance self.speed =", "not calculated\".format(w.ds_angle, int(w.angle * 180 / math.pi))) else: print(\"Wall {:3d}", "0, 315: 0} attitude = MazeAttitude() radar = Radar(0, radar_values,", "distances[self.right_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif dlong1 >=", "mline.angle is not None: if lline.angle * MazeAttitude.ANGLE_TOLLERANCE >= mline.angle", "int(left_diagonal_distance), int(expected_diagonal_distance))) return self.left_corner_action if front_distance < 550 and state.radar.radar_deltas[0]", "time, speed, next_action): super(DriverForwardForTimeAction, self).__init__(agent) self.time = time self.speed =", "90 self.a3 = 135 else: self.a1 = 315 self.a2 =", "check_next_action_conditions(self): return self class ChicaneAction(MazeAction): def __init__(self, agent, left_or_right, distance,", "self).start() def end(self): super(ChicaneAction, self).end() def next(self): if self.left_or_right ==", "and front_distance < 550: expected_diagonal_distance = 0 if state.left_wall_angle <", "+ \" ticks.\") def end(self): pass def next(self): if self.time", "and distance_error < -distance_speed: angle = math.pi / 4 if", "* 1), self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance *", "self.requested_heading = 0 self.pid = None self.next_action = next_action self.error", "< min_angle: distance = 1000000000 else: distance = steer_speed /", "d={: 4d} s={: 3d}\".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: wall_angle", "mm/second - TODO use odo to update to correct value!", "1, -PIhalf)} self.right_wall = self.Wall(90, 2, self.RIGHT_WALL, 0, 45, 90,", "log(LOG_LEVEL_INFO, \"Found second part of chicane, rfd={: 4d}\".format(int(diagonal_distance))) self.left_or_right =", "distance = 150 elif -150 < distance < 0: distance", "= 5 # 5-7 speed_distance_fudge_factor = 4 # 4 min_angle", "4 elif distance_error < 0 and distance_error < -distance_speed: angle", "for p in attitude.points: printWallLines(p) for w in attitude.walls: printWall(w)", "def start(self): super(MoveForwardOnOdo, self).start() state = self.rover.getRoverState() for wheel in", "* 2 * math.cos(math.pi / 4 + state.left_wall_angle) else: expected_diagonal_distance", "180, 225, 270, 315) self.front_wall = self.Wall(0, 0, self.FRONT_WALL, 270,", "that worked speed=150, steer=5-7, dist=4 # self.speed = 150 #", "-150 distance = -distance distance_from_wall = state.right_wall_distance distance_error = distance_from_wall", "PIhalf) distance = distances[self.left_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance)", "math.pi), w.distance)) def printWalls(): for p in attitude.points: printWallLines(p) for", "45deg else: try: angle = math.asin(distance_error / distance_speed) except BaseException", "= False def start(self): super(MazeCorridorAction, self).start() self.been_in_chicane = False def", "def __init__(self, agent, time, speed, next_action): super(DriverForwardForTimeAction, self).__init__(agent) self.time =", "self.is_front_or_back = self.ds_angle == 0 or self.ds_angle == 180 self.selected_line", "self.distances[wall.mid_point_index]) self.points[wall.ds_angle] = wall.wall_point_kind self.distances = {p: getPointDistance(state, p) for", "else: print(\"Wall {:3d} -> has angle {:3d} and is at", "def next(self): if self.time > 0: self.time -= 1 log(LOG_LEVEL_INFO,", "2 FRONT_WALL = 4 BACK_WALL = 8 NO_GAP = 0", "short_point_index, factor, adjust): self.line_index = line_index self.short_point_index = short_point_index self.long_point_index", "in self.POINTS} for line in self.lines: self.lines[line].calcAngle(self.distances) wls = [self.walls[w_ds_angle]", "0, 180: 0, 225: 0, 270: 0, 315: 0} self.distances", "MazeAttitude.normAngle(mline.angle + PIhalf) distance = distances[self.left_point_index] * abs(math.sin(mline.angle) / SQRT2)", "plong2 == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle,", "distance_speed: angle = math.pi / 4 elif distance_error < 0", "MazeAttitude.UNKNOWN and lmline.angle * MazeAttitude.ANGLE_TOLLERANCE >= lline.angle >= lmline.angle /", "= int(distance) angle = int(angle * 180 / math.pi) self.rover.command(pyroslib.publish,", "= MazeTurnAroundCornerAction(self, self.RIGHT, self.distance, self.speed, DriverForwardForTimeAction(self, 10, self.speed, None)) def", "for \" + str(self.time) + \" ticks.\") def end(self): pass", "by 180 -> 450/10 - 45deg elif distance_error < 0", "* 180 / math.pi) self.rover.command(pyroslib.publish, self.speed, angle, distance) # pyroslib.publish(\"move/steer\",", "MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 - short_distance) * self.factor + self.adjust) else: self.angle", "not None and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:", "try: angle = -math.asin(distance_error / distance_speed) except BaseException as ex:", "math.sqrt(2) PIhalf = math.pi / 2 class MazeAttitude: UNKNOWN =", "distance_error = distance_from_wall - self.distance angle = 0 if abs(distance_error)", "/ MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[rline.long_point_index] angle = MazeAttitude.normAngle(mline.angle + PIhalf)", "state.radar.radar[45] = SQRT2 * 5 * 0.9 # state.radar.radar[315] =", "calculated\".format(w.ds_angle)) else: if w.distance is None: print(\"Wall {:3d} -> has", "value! speed_steer_fudge_factor = 5 # 5-7 speed_distance_fudge_factor = 4 #", "self.RIGHT and diagonal_distance > 800: log(LOG_LEVEL_INFO, \"Found end ofchicane -", "self.required_odo[wheel_name] = distance def start(self): super(MoveForwardOnOdo, self).start() state = self.rover.getRoverState()", "distance = steer_speed / state.right_wall_angle if 0 <= distance <", "log(LOG_LEVEL_INFO, \"Turning speed={:04d} h={:07.3f} lh={:07.3f} dh={:07.3f} rh={:07.3f} e={:07.3f}\" .format(self.speed, heading,", "updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf, self.left_wall) updateUndefinedWall(self.back_wall, self.right_wall, PIhalf, self.left_wall) updateUndefinedWall(self.right_wall, self.front_wall,", "self.RIGHT_WALL, 0, 45, 90, 135) self.left_wall = self.Wall(270, 6, self.LEFT_WALL,", "dlong1 < dlong2 and plong1 == MazeAttitude.UNKNOWN and pmid ==", "self.pid.process(self.requested_heading, self.start_heading) log(LOG_LEVEL_INFO, \"Starting to turn around corner at distance", "the corner - invoking next action \" + self.next_action.getActionName()) else:", "state.wheel_odos.odos # log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d}", "1.0 or state.radar.radar[45] < 1.0: do_stop = True if do_stop:", "angle {:3d} but is too far - distance not calculated\".format(w.ds_angle,", "+ (\"L\" if self.left_or_right == self.LEFT else \"R\") class MazeCorridorAction(MazeAction):", "s={:2d} delta={:4d}\".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0])) else: if state.left_front_distance_of_wall > 100 and", "0} def setRequiredOdo(self, distance): for wheel_name in WHEEL_NAMES: self.required_odo[wheel_name] =", "self.rover.command(pyroslib.publish, self.speed, angle, distance) # pyroslib.publish(\"move/steer\", str(distance) + \" \"", "turning to finish, rfd={: 4d} fd={: 4d} \".format(int(state.right_front_distance_of_wall), int(front_distance))) return", "time from pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG from rover", "printWall(w) print(\"----------------------------------------------------------\") # attitude.calculate(state) # printWalls() # # state.radar.radar[0] =", "def next(self): heading = state.heading.heading self.error = self.pid.process(self.requested_heading, heading) if", "if __name__ == \"__main__\": from rover import Radar, RoverState radar_values", "+ 80 * -(1 if self.left_or_right == self.RIGHT else -1))", "-150 < distance < 0: distance = -150 distance_from_wall =", "spectified.\") return self.next_action def execute(self): state = self.rover.getRoverState() heading =", "else: try: angle = math.asin(distance_error / distance_speed) except BaseException as", "and abs(state.radar.radar_deltas[0]) > 100: log(LOG_LEVEL_INFO, \"Front distance not correct: d={:4d}", "normaiseAngle(self.start_heading + 80 * -(1 if self.left_or_right == self.RIGHT else", "points[self.mid_point_index] plong2 = points[self.right_point_index] if dlong1 < dlong2 and plong1", "or state.radar.radar[315] < 1.0 or state.radar.radar[45] < 1.0: do_stop =", "= steer_speed / state.right_wall_angle if 0 <= distance < 150:", "-1 RIGHT = 1 def __init__(self, agent): super(MazeAction, self).__init__(agent) def", "self.Line(self.L90_135, 135, 90, 1, -PIhalf), self.L135_180: self.Line(self.L135_180, 135, 180, -1,", "angle -= math.pi * (450 - front_distance) / 1800 #", "= SQRT2 * 5 * 0.9 # state.radar.radar[315] = SQRT2", "distance_speed: angle = -math.pi / 4 if front_distance < 450:", "\"{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={:", "135 L180_225 = 180 L225_270 = 225 L270_315 = 270", "= None self.distance = None def setAngle(self, angle, distances): self.angle", "1 SIDE_GAP = 2 POINTS = [0, 45, 90, 135,", "{:3d} but is too far - distance not calculated\".format(w.ds_angle, int(w.angle", "__init__(self, agent, left_or_right, distance, speed, next_action=None): super(MazeTurnAroundCornerAction, self).__init__(agent) self.left_or_right =", "315: 0} self.distances = {0: 0, 45: 0, 90: 0,", "[L0_45, L45_90, L90_135, L135_180, L180_225, L225_270, L270_315, L315_0] ANGLE_TOLLERANCE =", "int(distance) angle = int(angle * 180 / math.pi) self.rover.command(pyroslib.publish, self.speed,", "distance_error > 0 and distance_error > distance_speed: angle = -math.pi", "None self.next_action = next_action self.error = 0 def start(self): super(MazeTurnAroundCornerAction,", "wall = \"no wall\" print(\"{:3d} -> line at {:3d} angle", "heading = state.heading.heading self.error = self.pid.process(self.requested_heading, heading) if self.left_or_right ==", "0, -1, math.pi), self.L0_45: self.Line(self.L0_45, 45, 0, 1, -math.pi), self.L45_90:", "90, 135, 180, 225, 270, 315] WALLS = [90, 270,", "135, 90, 1, -PIhalf), self.L135_180: self.Line(self.L135_180, 135, 180, -1, math.pi),", "- no next action spectified.\") return self.next_action def execute(self): state", "distance_from_wall = state.left_wall_distance distance_error = distance_from_wall - self.distance angle =", "else: if point == MazeAttitude.LEFT_WALL: wall = \"left wall\" elif", "= 0 LEFT_WALL = 1 RIGHT_WALL = 2 FRONT_WALL =", "state.radar.radar[315] < 1.0 or state.radar.radar[45] < 1.0: do_stop = True", "135: SQRT2 * 10, 180: 10, 225: SQRT2 * 10,", "= self.rover.getRoverState() do_stop = False log(LOG_LEVEL_DEBUG, \"Driving to \" +", "if dlong1 < dlong2 and plong1 == MazeAttitude.UNKNOWN and pmid", "left_or_right, distance, speed, next_action=None): super(ChicaneAction, self).__init__(agent) self.left_or_right = left_or_right self.distance", "4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) return self.left_corner_action if front_distance < 550", "-math.pi / 4 elif distance_error < 0 and distance_error <", "= next_action if self.left_or_right == MazeAction.RIGHT: self.a1 = 45 self.a2", "return \"Turn-Around-Corner\" class DriverForwardForTimeAction(Action): def __init__(self, agent, time, speed, next_action):", "agent, left_or_right, distance, speed, next_action=None): super(MazeTurnAroundCornerAction, self).__init__(agent) self.left_or_right = left_or_right", "self.stop_action else: return self def execute(self): pass def getActionName(self): return", "== MazeAttitude.BACK_WALL: wall = \"back wall\" else: wall = \"no", "self.LEFT, self.distance, self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, self.distance, self.speed,", "wheel_name in WHEEL_NAMES: if state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]: do_stop = True", "= speed * speed_steer_fudge_factor distance_speed = speed * speed_distance_fudge_factor if", "mline = lines[self.mid_point_index] rline = lines[self.right_point_index] dlong1 = distances[lline.long_point_index] dmid", "is not None: wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust), self.distances[wall.mid_point_index]) else: wall.setAngleAndDistance(self.normAngle(second_wall.angle -", "distance = -distance distance_from_wall = state.right_wall_distance distance_error = distance_from_wall -", "self.requested_heading = normaiseAngle(self.start_heading + 80 * -(1 if self.left_or_right ==", "else: wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust), self.distances[wall.mid_point_index]) self.points[wall.ds_angle] = wall.wall_point_kind self.distances =", "0, diff_method=angleDiference) self.pid.process(self.requested_heading, self.start_heading) log(LOG_LEVEL_INFO, \"Starting to turn around corner", "distance, speed, next_action=None): super(MazeCorridorAction, self).__init__(agent) self.left_or_right = left_or_right self.distance =", "pmid = points[self.mid_point_index] plong2 = points[self.right_point_index] if dlong1 < dlong2", "lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}\".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance),", "str(self.time) + \" ticks.\") return self return self.next_action if __name__", "\" + str(self.required_odo) + \"; starting...\") self.rover.command(pyroslib.publish, 300, 120) #", "BACK_WALL = 8 NO_GAP = 0 FORWARD_GAP = 1 SIDE_GAP", "str(angle)) wheel_orientations = state.wheel_odos.odos # log(LOG_LEVEL_INFO, \"{:16.3f}: dist_f={: 4d} wa={:", "self.left_gap = self.NO_GAP self.right_gap = self.NO_GAP self.walls = {self.right_wall.ds_angle: self.right_wall,", "start(self): self.rover.command(pyroslib.publish, self.speed, 0) # pyroslib.publish(\"move/drive\", \"0 \" + str(self.speed))", "0: left_distances = state.radar.radar[270] + state.radar.radar[315] right_distances = state.radar.radar[90] +", "/ SQRT2) self.setAngleAndDistance(angle, distance) elif dlong1 >= dlong2 and plong2", "= state.radar.radar[270] + state.radar.radar[315] right_distances = state.radar.radar[90] + state.radar.radar[45] if", "for w_ds_angle in self.WALLS if self.distances[w_ds_angle] is not None] wall_processing_order", "= heading log(LOG_LEVEL_INFO, \"Turning speed={:04d} h={:07.3f} lh={:07.3f} dh={:07.3f} rh={:07.3f} e={:07.3f}\"", "in self.WALLS if self.distances[w_ds_angle] is not None] wall_processing_order = sorted(wls,", "+ \" ticks.\") return self return self.next_action if __name__ ==", "self.time = time self.speed = speed self.next_action = next_action def", "finish, rfd={: 4d} fd={: 4d} \".format(int(state.right_front_distance_of_wall), int(front_distance))) return self.right_corner_action return", "> PIhalf: a = a - math.pi elif a <=", "rover import WheelOdos, WHEEL_NAMES from rover import normaiseAngle, angleDiference from", "self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle: self.back_wall} self.points = {0: 0,", "= adjust self.angle = None def calcAngle(self, distances): long_distance =", "distances[lline.long_point_index] dmid = distances[mline.short_point_index] dlong2 = distances[mline.long_point_index] plong1 = points[self.left_point_index]", "super(MazeTurnAroundCornerAction, self).start() state = self.rover.getRoverState() self.start_heading = state.heading.heading self.requested_heading =", "printWallLines(a): if attitude.lines[a].angle is None: print(\"{:3d} -> point too far", "-math.asin(distance_error / distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS, \"Domain error", "L270_315, L315_0] ANGLE_TOLLERANCE = 1.075 @staticmethod def normAngle(a): if a", "None def calcAngle(self, distances): long_distance = distances[self.long_point_index] short_distance = distances[self.short_point_index]", "to update to correct value! speed_steer_fudge_factor = 5 # 5-7", "state.radar.radar[45] < 1.0: do_stop = True if do_stop: return self.stop_action", "too far - not calculated\".format(w.ds_angle)) else: if w.distance is None:", "* speed_steer_fudge_factor distance_speed = speed * speed_distance_fudge_factor if self.left_or_right ==", "(\"L\" if self.left_or_right == self.LEFT else \"R\") class MazeCorridorAction(MazeAction): def", "450/10 - 45deg else: try: angle = math.asin(distance_error / distance_speed)", "4d} de={: 4d} d={: 4d} s={: 3d}\".format(int(0), int(distance_from_wall), int(distance_error), int(distance),", "self).start() self.been_in_chicane = False def end(self): super(MazeCorridorAction, self).end() def next(self):", "int(state.right_wall_angle * 180 / math.pi), int(state.right_front_distance_of_wall), int(steer_speed), int(distance_speed), int(distance), int(angle),", "dlong2 = distances[mline.long_point_index] plong1 = points[self.left_point_index] pmid = points[self.mid_point_index] plong2", "right_distances = state.radar.radar[90] + state.radar.radar[45] if left_distances > right_distances: log(LOG_LEVEL_INFO,", "heading={: 3d} odo={:7.2f}\".format( float(time.time()), int(front_distance), int(wall_angle * 180 / math.pi),", "self.angle = angle distance = distances[self.mid_point_index] if distance < 1:", "import WheelOdos, WHEEL_NAMES from rover import normaiseAngle, angleDiference from challenge_utils", "= {0: 0, 45: 0, 90: 0, 135: 0, 180:", "/ 4 else: try: angle = -math.asin(distance_error / distance_speed) except", "= self.last_heading self.last_heading = heading log(LOG_LEVEL_INFO, \"Turning speed={:04d} h={:07.3f} lh={:07.3f}", "= self.NO_GAP self.walls = {self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle: self.front_wall,", "if 0 <= distance < 150: distance = 150 elif", "return self def execute(self): state = self.rover.getRoverState() front_distance = state.radar.radar[0]", "= angle distance = distances[self.mid_point_index] if distance < 1: self.distance", "self.Line(self.L0_45, 45, 0, 1, -math.pi), self.L45_90: self.Line(self.L45_90, 45, 90, -1,", "return ChicaneAction(self, self.LEFT, self.distance, self.speed, next_action=self) else: log(LOG_LEVEL_INFO, \"Found corner" ]
[ "['Metric'], 'spaceone.monitoring.interface.grpc.v1.project_alert_config': ['ProjectAlertConfig'], 'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'], 'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'], 'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'], 'spaceone.monitoring.interface.grpc.v1.maintenance_window':", "['DataSource'], 'spaceone.monitoring.interface.grpc.v1.metric': ['Metric'], 'spaceone.monitoring.interface.grpc.v1.project_alert_config': ['ProjectAlertConfig'], 'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'], 'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'], 'spaceone.monitoring.interface.grpc.v1.webhook':", "= { 'spaceone.monitoring.interface.grpc.v1.data_source': ['DataSource'], 'spaceone.monitoring.interface.grpc.v1.metric': ['Metric'], 'spaceone.monitoring.interface.grpc.v1.project_alert_config': ['ProjectAlertConfig'], 'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'],", "'spaceone.monitoring.interface.grpc.v1.data_source': ['DataSource'], 'spaceone.monitoring.interface.grpc.v1.metric': ['Metric'], 'spaceone.monitoring.interface.grpc.v1.project_alert_config': ['ProjectAlertConfig'], 'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'], 'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'],", "PROTO = { 'spaceone.monitoring.interface.grpc.v1.data_source': ['DataSource'], 'spaceone.monitoring.interface.grpc.v1.metric': ['Metric'], 'spaceone.monitoring.interface.grpc.v1.project_alert_config': ['ProjectAlertConfig'], 'spaceone.monitoring.interface.grpc.v1.escalation_policy':", "['EventRule'], 'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'], 'spaceone.monitoring.interface.grpc.v1.maintenance_window': ['MaintenanceWindow'], 'spaceone.monitoring.interface.grpc.v1.alert': ['Alert'], 'spaceone.monitoring.interface.grpc.v1.note': ['Note'], 'spaceone.monitoring.interface.grpc.v1.event':", "'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'], 'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'], 'spaceone.monitoring.interface.grpc.v1.maintenance_window': ['MaintenanceWindow'], 'spaceone.monitoring.interface.grpc.v1.alert': ['Alert'], 'spaceone.monitoring.interface.grpc.v1.note': ['Note'],", "'spaceone.monitoring.interface.grpc.v1.project_alert_config': ['ProjectAlertConfig'], 'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'], 'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'], 'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'], 'spaceone.monitoring.interface.grpc.v1.maintenance_window': ['MaintenanceWindow'],", "{ 'spaceone.monitoring.interface.grpc.v1.data_source': ['DataSource'], 'spaceone.monitoring.interface.grpc.v1.metric': ['Metric'], 'spaceone.monitoring.interface.grpc.v1.project_alert_config': ['ProjectAlertConfig'], 'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'], 'spaceone.monitoring.interface.grpc.v1.event_rule':", "'spaceone.monitoring.interface.grpc.v1.metric': ['Metric'], 'spaceone.monitoring.interface.grpc.v1.project_alert_config': ['ProjectAlertConfig'], 'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'], 'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'], 'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'],", "['EscalationPolicy'], 'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'], 'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'], 'spaceone.monitoring.interface.grpc.v1.maintenance_window': ['MaintenanceWindow'], 'spaceone.monitoring.interface.grpc.v1.alert': ['Alert'], 'spaceone.monitoring.interface.grpc.v1.note':", "'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'], 'spaceone.monitoring.interface.grpc.v1.maintenance_window': ['MaintenanceWindow'], 'spaceone.monitoring.interface.grpc.v1.alert': ['Alert'], 'spaceone.monitoring.interface.grpc.v1.note': ['Note'], 'spaceone.monitoring.interface.grpc.v1.event': ['Event'],", "['ProjectAlertConfig'], 'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'], 'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'], 'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'], 'spaceone.monitoring.interface.grpc.v1.maintenance_window': ['MaintenanceWindow'], 'spaceone.monitoring.interface.grpc.v1.alert':", "'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'], 'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'], 'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'], 'spaceone.monitoring.interface.grpc.v1.maintenance_window': ['MaintenanceWindow'], 'spaceone.monitoring.interface.grpc.v1.alert': ['Alert'],", "['Webhook'], 'spaceone.monitoring.interface.grpc.v1.maintenance_window': ['MaintenanceWindow'], 'spaceone.monitoring.interface.grpc.v1.alert': ['Alert'], 'spaceone.monitoring.interface.grpc.v1.note': ['Note'], 'spaceone.monitoring.interface.grpc.v1.event': ['Event'], }" ]
[ "= models.ForeignKey(OrgUnit) class House(models.Model): address = models.CharField(max_length=32) class OrderedPerson(models.Model): name", "= models.TextField() class Contact(models.Model): label = models.CharField(max_length=100) class Email(Contact): email_address", "models.CharField(max_length=25) awards = GenericRelation(Award) class Book(models.Model): pagecount = models.IntegerField() class", "models.PositiveIntegerField() content_type = models.ForeignKey(ContentType) content_object = GenericForeignKey() class AwardNote(models.Model): award", "Policy(models.Model): policy_number = models.CharField(max_length=10) class Version(models.Model): policy = models.ForeignKey(Policy) class", "blank=True, null=True) class Item(models.Model): version = models.ForeignKey(Version) location = models.ForeignKey(Location,", "= models.ForeignKey(Version) location = models.ForeignKey(Location, blank=True, null=True) # Models for", "class Food(models.Model): name = models.CharField(max_length=20, unique=True) class Eaten(models.Model): food =", "Login(models.Model): description = models.CharField(max_length=32) orgunit = models.ForeignKey(OrgUnit) class House(models.Model): address", "content_type = models.ForeignKey(ContentType) content_object = GenericForeignKey() class AwardNote(models.Model): award =", "= models.CharField(max_length=32) class OrderedPerson(models.Model): name = models.CharField(max_length=32) lives_in = models.ForeignKey(House)", "= models.CharField(max_length=25) object_id = models.PositiveIntegerField() content_type = models.ForeignKey(ContentType) content_object =", "models.ForeignKey(Food, to_field=\"name\") meal = models.CharField(max_length=20) # Models for #15776 class", "FooImage(models.Model): my_image = models.ForeignKey(Image) class FooFile(models.Model): my_file = models.ForeignKey(File) class", "class Meta: proxy = True class FooImage(models.Model): my_image = models.ForeignKey(Image)", "Book(models.Model): pagecount = models.IntegerField() class Toy(models.Model): name = models.CharField(max_length=50) class", "= models.CharField(max_length=50) toys = models.ManyToManyField(Toy, through='PlayedWith') class PlayedWith(models.Model): child =", "models.ManyToManyField(Contact, related_name=\"research_contacts\") class Food(models.Model): name = models.CharField(max_length=20, unique=True) class Eaten(models.Model):", "models.CharField(max_length=50) class Child(models.Model): name = models.CharField(max_length=50) toys = models.ManyToManyField(Toy, through='PlayedWith')", "class Toy(models.Model): name = models.CharField(max_length=50) class Child(models.Model): name = models.CharField(max_length=50)", "models.CharField(max_length=32) class OrderedPerson(models.Model): name = models.CharField(max_length=32) lives_in = models.ForeignKey(House) class", "= models.ForeignKey(PlayedWith) note = models.TextField() class Contact(models.Model): label = models.CharField(max_length=100)", "= models.CharField(max_length=100) class Person(models.Model): name = models.CharField(max_length=25) awards = GenericRelation(Award)", "for #16128 class File(models.Model): pass class Image(File): class Meta: proxy", "Meta: proxy = True class Photo(Image): class Meta: proxy =", "class Eaten(models.Model): food = models.ForeignKey(Food, to_field=\"name\") meal = models.CharField(max_length=20) #", "class Award(models.Model): name = models.CharField(max_length=25) object_id = models.PositiveIntegerField() content_type =", "= models.ForeignKey(Award) note = models.CharField(max_length=100) class Person(models.Model): name = models.CharField(max_length=25)", "class FooPhoto(models.Model): my_photo = models.ForeignKey(Photo) class FooFileProxy(FooFile): class Meta: proxy", "import models class Award(models.Model): name = models.CharField(max_length=25) object_id = models.PositiveIntegerField()", "class Email(Contact): email_address = models.EmailField(max_length=100) class Researcher(models.Model): contacts = models.ManyToManyField(Contact,", "= models.ForeignKey(Policy) class Location(models.Model): version = models.ForeignKey(Version, blank=True, null=True) class", "= models.CharField(max_length=20) # Models for #15776 class Policy(models.Model): policy_number =", "note = models.CharField(max_length=100) class Person(models.Model): name = models.CharField(max_length=25) awards =", "= models.ForeignKey(File) class FooPhoto(models.Model): my_photo = models.ForeignKey(Photo) class FooFileProxy(FooFile): class", "Version(models.Model): policy = models.ForeignKey(Policy) class Location(models.Model): version = models.ForeignKey(Version, blank=True,", "class Researcher(models.Model): contacts = models.ManyToManyField(Contact, related_name=\"research_contacts\") class Food(models.Model): name =", "class Person(models.Model): name = models.CharField(max_length=25) awards = GenericRelation(Award) class Book(models.Model):", "models.CharField(max_length=10) class Version(models.Model): policy = models.ForeignKey(Policy) class Location(models.Model): version =", "Person(models.Model): name = models.CharField(max_length=25) awards = GenericRelation(Award) class Book(models.Model): pagecount", "class Meta: proxy = True class Photo(Image): class Meta: proxy", "name = models.CharField(max_length=50) toys = models.ManyToManyField(Toy, through='PlayedWith') class PlayedWith(models.Model): child", "orgunit = models.ForeignKey(OrgUnit) class House(models.Model): address = models.CharField(max_length=32) class OrderedPerson(models.Model):", "Location(models.Model): version = models.ForeignKey(Version, blank=True, null=True) class Item(models.Model): version =", "name = models.CharField(max_length=25) object_id = models.PositiveIntegerField() content_type = models.ForeignKey(ContentType) content_object", "models.CharField(max_length=25) object_id = models.PositiveIntegerField() content_type = models.ForeignKey(ContentType) content_object = GenericForeignKey()", "django.db import models class Award(models.Model): name = models.CharField(max_length=25) object_id =", "= models.ManyToManyField(Contact, related_name=\"research_contacts\") class Food(models.Model): name = models.CharField(max_length=20, unique=True) class", "proxy = True class Photo(Image): class Meta: proxy = True", "models.ForeignKey(OrgUnit) class House(models.Model): address = models.CharField(max_length=32) class OrderedPerson(models.Model): name =", "class Book(models.Model): pagecount = models.IntegerField() class Toy(models.Model): name = models.CharField(max_length=50)", "class FooFile(models.Model): my_file = models.ForeignKey(File) class FooPhoto(models.Model): my_photo = models.ForeignKey(Photo)", "class Image(File): class Meta: proxy = True class Photo(Image): class", "<gh_stars>1-10 from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation ) from django.contrib.contenttypes.models", "GenericForeignKey, GenericRelation ) from django.contrib.contenttypes.models import ContentType from django.db import", "for #15776 class Policy(models.Model): policy_number = models.CharField(max_length=10) class Version(models.Model): policy", "null=True) class Item(models.Model): version = models.ForeignKey(Version) location = models.ForeignKey(Location, blank=True,", "address = models.CharField(max_length=32) class OrderedPerson(models.Model): name = models.CharField(max_length=32) lives_in =", "models.ForeignKey(PlayedWith) note = models.TextField() class Contact(models.Model): label = models.CharField(max_length=100) class", "class Child(models.Model): name = models.CharField(max_length=50) toys = models.ManyToManyField(Toy, through='PlayedWith') class", "models.CharField(max_length=50) toys = models.ManyToManyField(Toy, through='PlayedWith') class PlayedWith(models.Model): child = models.ForeignKey(Child)", "= models.CharField(max_length=10) class Version(models.Model): policy = models.ForeignKey(Policy) class Location(models.Model): version", "True class FooImage(models.Model): my_image = models.ForeignKey(Image) class FooFile(models.Model): my_file =", "from django.db import models class Award(models.Model): name = models.CharField(max_length=25) object_id", "toys = models.ManyToManyField(Toy, through='PlayedWith') class PlayedWith(models.Model): child = models.ForeignKey(Child) toy", "= GenericRelation(Award) class Book(models.Model): pagecount = models.IntegerField() class Toy(models.Model): name", "models.DateField(db_column='date_col') class PlayedWithNote(models.Model): played = models.ForeignKey(PlayedWith) note = models.TextField() class", "models.ForeignKey(Image) class FooFile(models.Model): my_file = models.ForeignKey(File) class FooPhoto(models.Model): my_photo =", ") from django.contrib.contenttypes.models import ContentType from django.db import models class", "FooFile(models.Model): my_file = models.ForeignKey(File) class FooPhoto(models.Model): my_photo = models.ForeignKey(Photo) class", "models.ForeignKey(File) class FooPhoto(models.Model): my_photo = models.ForeignKey(Photo) class FooFileProxy(FooFile): class Meta:", "food = models.ForeignKey(Food, to_field=\"name\") meal = models.CharField(max_length=20) # Models for", "AwardNote(models.Model): award = models.ForeignKey(Award) note = models.CharField(max_length=100) class Person(models.Model): name", "Researcher(models.Model): contacts = models.ManyToManyField(Contact, related_name=\"research_contacts\") class Food(models.Model): name = models.CharField(max_length=20,", "models.ForeignKey(Policy) class Location(models.Model): version = models.ForeignKey(Version, blank=True, null=True) class Item(models.Model):", "= models.DateField(db_column='date_col') class PlayedWithNote(models.Model): played = models.ForeignKey(PlayedWith) note = models.TextField()", "#16128 class File(models.Model): pass class Image(File): class Meta: proxy =", "email_address = models.EmailField(max_length=100) class Researcher(models.Model): contacts = models.ManyToManyField(Contact, related_name=\"research_contacts\") class", "name = models.CharField(max_length=32) lives_in = models.ForeignKey(House) class Meta: ordering =", "class FooFileProxy(FooFile): class Meta: proxy = True class OrgUnit(models.Model): name", "# Models for #15776 class Policy(models.Model): policy_number = models.CharField(max_length=10) class", "class File(models.Model): pass class Image(File): class Meta: proxy = True", "House(models.Model): address = models.CharField(max_length=32) class OrderedPerson(models.Model): name = models.CharField(max_length=32) lives_in", "class PlayedWithNote(models.Model): played = models.ForeignKey(PlayedWith) note = models.TextField() class Contact(models.Model):", "models.IntegerField() class Toy(models.Model): name = models.CharField(max_length=50) class Child(models.Model): name =", "True class Photo(Image): class Meta: proxy = True class FooImage(models.Model):", "class Item(models.Model): version = models.ForeignKey(Version) location = models.ForeignKey(Location, blank=True, null=True)", "= models.CharField(max_length=32) orgunit = models.ForeignKey(OrgUnit) class House(models.Model): address = models.CharField(max_length=32)", "= models.ForeignKey(Image) class FooFile(models.Model): my_file = models.ForeignKey(File) class FooPhoto(models.Model): my_photo", "= models.EmailField(max_length=100) class Researcher(models.Model): contacts = models.ManyToManyField(Contact, related_name=\"research_contacts\") class Food(models.Model):", "class Policy(models.Model): policy_number = models.CharField(max_length=10) class Version(models.Model): policy = models.ForeignKey(Policy)", "#15776 class Policy(models.Model): policy_number = models.CharField(max_length=10) class Version(models.Model): policy =", "related_name=\"research_contacts\") class Food(models.Model): name = models.CharField(max_length=20, unique=True) class Eaten(models.Model): food", "models.ForeignKey(Location, blank=True, null=True) # Models for #16128 class File(models.Model): pass", "toy = models.ForeignKey(Toy) date = models.DateField(db_column='date_col') class PlayedWithNote(models.Model): played =", "pagecount = models.IntegerField() class Toy(models.Model): name = models.CharField(max_length=50) class Child(models.Model):", "awards = GenericRelation(Award) class Book(models.Model): pagecount = models.IntegerField() class Toy(models.Model):", "class OrgUnit(models.Model): name = models.CharField(max_length=64, unique=True) class Login(models.Model): description =", "Email(Contact): email_address = models.EmailField(max_length=100) class Researcher(models.Model): contacts = models.ManyToManyField(Contact, related_name=\"research_contacts\")", "version = models.ForeignKey(Version, blank=True, null=True) class Item(models.Model): version = models.ForeignKey(Version)", "GenericForeignKey() class AwardNote(models.Model): award = models.ForeignKey(Award) note = models.CharField(max_length=100) class", "to_field=\"name\") meal = models.CharField(max_length=20) # Models for #15776 class Policy(models.Model):", "# Models for #16128 class File(models.Model): pass class Image(File): class", "Photo(Image): class Meta: proxy = True class FooImage(models.Model): my_image =", "contacts = models.ManyToManyField(Contact, related_name=\"research_contacts\") class Food(models.Model): name = models.CharField(max_length=20, unique=True)", "models.CharField(max_length=100) class Person(models.Model): name = models.CharField(max_length=25) awards = GenericRelation(Award) class", "my_file = models.ForeignKey(File) class FooPhoto(models.Model): my_photo = models.ForeignKey(Photo) class FooFileProxy(FooFile):", "unique=True) class Eaten(models.Model): food = models.ForeignKey(Food, to_field=\"name\") meal = models.CharField(max_length=20)", "GenericRelation(Award) class Book(models.Model): pagecount = models.IntegerField() class Toy(models.Model): name =", "( GenericForeignKey, GenericRelation ) from django.contrib.contenttypes.models import ContentType from django.db", "class FooImage(models.Model): my_image = models.ForeignKey(Image) class FooFile(models.Model): my_file = models.ForeignKey(File)", "True class OrgUnit(models.Model): name = models.CharField(max_length=64, unique=True) class Login(models.Model): description", "OrderedPerson(models.Model): name = models.CharField(max_length=32) lives_in = models.ForeignKey(House) class Meta: ordering", "= True class OrgUnit(models.Model): name = models.CharField(max_length=64, unique=True) class Login(models.Model):", "date = models.DateField(db_column='date_col') class PlayedWithNote(models.Model): played = models.ForeignKey(PlayedWith) note =", "Meta: proxy = True class FooImage(models.Model): my_image = models.ForeignKey(Image) class", "= models.CharField(max_length=20, unique=True) class Eaten(models.Model): food = models.ForeignKey(Food, to_field=\"name\") meal", "note = models.TextField() class Contact(models.Model): label = models.CharField(max_length=100) class Email(Contact):", "models.CharField(max_length=64, unique=True) class Login(models.Model): description = models.CharField(max_length=32) orgunit = models.ForeignKey(OrgUnit)", "from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation ) from django.contrib.contenttypes.models import", "played = models.ForeignKey(PlayedWith) note = models.TextField() class Contact(models.Model): label =", "class Login(models.Model): description = models.CharField(max_length=32) orgunit = models.ForeignKey(OrgUnit) class House(models.Model):", "models.ForeignKey(ContentType) content_object = GenericForeignKey() class AwardNote(models.Model): award = models.ForeignKey(Award) note", "through='PlayedWith') class PlayedWith(models.Model): child = models.ForeignKey(Child) toy = models.ForeignKey(Toy) date", "models.TextField() class Contact(models.Model): label = models.CharField(max_length=100) class Email(Contact): email_address =", "name = models.CharField(max_length=20, unique=True) class Eaten(models.Model): food = models.ForeignKey(Food, to_field=\"name\")", "= models.ForeignKey(Food, to_field=\"name\") meal = models.CharField(max_length=20) # Models for #15776", "blank=True, null=True) # Models for #16128 class File(models.Model): pass class", "Image(File): class Meta: proxy = True class Photo(Image): class Meta:", "= GenericForeignKey() class AwardNote(models.Model): award = models.ForeignKey(Award) note = models.CharField(max_length=100)", "description = models.CharField(max_length=32) orgunit = models.ForeignKey(OrgUnit) class House(models.Model): address =", "Models for #15776 class Policy(models.Model): policy_number = models.CharField(max_length=10) class Version(models.Model):", "proxy = True class FooImage(models.Model): my_image = models.ForeignKey(Image) class FooFile(models.Model):", "label = models.CharField(max_length=100) class Email(Contact): email_address = models.EmailField(max_length=100) class Researcher(models.Model):", "= models.ForeignKey(ContentType) content_object = GenericForeignKey() class AwardNote(models.Model): award = models.ForeignKey(Award)", "ContentType from django.db import models class Award(models.Model): name = models.CharField(max_length=25)", "class Version(models.Model): policy = models.ForeignKey(Policy) class Location(models.Model): version = models.ForeignKey(Version,", "Child(models.Model): name = models.CharField(max_length=50) toys = models.ManyToManyField(Toy, through='PlayedWith') class PlayedWith(models.Model):", "unique=True) class Login(models.Model): description = models.CharField(max_length=32) orgunit = models.ForeignKey(OrgUnit) class", "class Location(models.Model): version = models.ForeignKey(Version, blank=True, null=True) class Item(models.Model): version", "class PlayedWith(models.Model): child = models.ForeignKey(Child) toy = models.ForeignKey(Toy) date =", "name = models.CharField(max_length=25) awards = GenericRelation(Award) class Book(models.Model): pagecount =", "PlayedWithNote(models.Model): played = models.ForeignKey(PlayedWith) note = models.TextField() class Contact(models.Model): label", "class Contact(models.Model): label = models.CharField(max_length=100) class Email(Contact): email_address = models.EmailField(max_length=100)", "models.ForeignKey(Toy) date = models.DateField(db_column='date_col') class PlayedWithNote(models.Model): played = models.ForeignKey(PlayedWith) note", "class OrderedPerson(models.Model): name = models.CharField(max_length=32) lives_in = models.ForeignKey(House) class Meta:", "meal = models.CharField(max_length=20) # Models for #15776 class Policy(models.Model): policy_number", "models.ForeignKey(Version, blank=True, null=True) class Item(models.Model): version = models.ForeignKey(Version) location =", "class AwardNote(models.Model): award = models.ForeignKey(Award) note = models.CharField(max_length=100) class Person(models.Model):", "models.CharField(max_length=100) class Email(Contact): email_address = models.EmailField(max_length=100) class Researcher(models.Model): contacts =", "models class Award(models.Model): name = models.CharField(max_length=25) object_id = models.PositiveIntegerField() content_type", "Food(models.Model): name = models.CharField(max_length=20, unique=True) class Eaten(models.Model): food = models.ForeignKey(Food,", "name = models.CharField(max_length=50) class Child(models.Model): name = models.CharField(max_length=50) toys =", "= models.CharField(max_length=100) class Email(Contact): email_address = models.EmailField(max_length=100) class Researcher(models.Model): contacts", "= models.CharField(max_length=25) awards = GenericRelation(Award) class Book(models.Model): pagecount = models.IntegerField()", "my_photo = models.ForeignKey(Photo) class FooFileProxy(FooFile): class Meta: proxy = True", "OrgUnit(models.Model): name = models.CharField(max_length=64, unique=True) class Login(models.Model): description = models.CharField(max_length=32)", "version = models.ForeignKey(Version) location = models.ForeignKey(Location, blank=True, null=True) # Models", "= models.PositiveIntegerField() content_type = models.ForeignKey(ContentType) content_object = GenericForeignKey() class AwardNote(models.Model):", "award = models.ForeignKey(Award) note = models.CharField(max_length=100) class Person(models.Model): name =", "models.ForeignKey(Photo) class FooFileProxy(FooFile): class Meta: proxy = True class OrgUnit(models.Model):", "class Meta: proxy = True class OrgUnit(models.Model): name = models.CharField(max_length=64,", "child = models.ForeignKey(Child) toy = models.ForeignKey(Toy) date = models.DateField(db_column='date_col') class", "Award(models.Model): name = models.CharField(max_length=25) object_id = models.PositiveIntegerField() content_type = models.ForeignKey(ContentType)", "Item(models.Model): version = models.ForeignKey(Version) location = models.ForeignKey(Location, blank=True, null=True) #", "= models.CharField(max_length=50) class Child(models.Model): name = models.CharField(max_length=50) toys = models.ManyToManyField(Toy,", "= models.ForeignKey(Location, blank=True, null=True) # Models for #16128 class File(models.Model):", "null=True) # Models for #16128 class File(models.Model): pass class Image(File):", "models.ManyToManyField(Toy, through='PlayedWith') class PlayedWith(models.Model): child = models.ForeignKey(Child) toy = models.ForeignKey(Toy)", "import ContentType from django.db import models class Award(models.Model): name =", "class Photo(Image): class Meta: proxy = True class FooImage(models.Model): my_image", "my_image = models.ForeignKey(Image) class FooFile(models.Model): my_file = models.ForeignKey(File) class FooPhoto(models.Model):", "models.CharField(max_length=32) orgunit = models.ForeignKey(OrgUnit) class House(models.Model): address = models.CharField(max_length=32) class", "models.CharField(max_length=20) # Models for #15776 class Policy(models.Model): policy_number = models.CharField(max_length=10)", "location = models.ForeignKey(Location, blank=True, null=True) # Models for #16128 class", "= models.CharField(max_length=32) lives_in = models.ForeignKey(House) class Meta: ordering = ['name']", "class House(models.Model): address = models.CharField(max_length=32) class OrderedPerson(models.Model): name = models.CharField(max_length=32)", "= models.ManyToManyField(Toy, through='PlayedWith') class PlayedWith(models.Model): child = models.ForeignKey(Child) toy =", "FooFileProxy(FooFile): class Meta: proxy = True class OrgUnit(models.Model): name =", "import ( GenericForeignKey, GenericRelation ) from django.contrib.contenttypes.models import ContentType from", "Meta: proxy = True class OrgUnit(models.Model): name = models.CharField(max_length=64, unique=True)", "= models.ForeignKey(Version, blank=True, null=True) class Item(models.Model): version = models.ForeignKey(Version) location", "= True class FooImage(models.Model): my_image = models.ForeignKey(Image) class FooFile(models.Model): my_file", "File(models.Model): pass class Image(File): class Meta: proxy = True class", "Eaten(models.Model): food = models.ForeignKey(Food, to_field=\"name\") meal = models.CharField(max_length=20) # Models", "from django.contrib.contenttypes.models import ContentType from django.db import models class Award(models.Model):", "Models for #16128 class File(models.Model): pass class Image(File): class Meta:", "django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation ) from django.contrib.contenttypes.models import ContentType", "models.ForeignKey(Version) location = models.ForeignKey(Location, blank=True, null=True) # Models for #16128", "FooPhoto(models.Model): my_photo = models.ForeignKey(Photo) class FooFileProxy(FooFile): class Meta: proxy =", "object_id = models.PositiveIntegerField() content_type = models.ForeignKey(ContentType) content_object = GenericForeignKey() class", "name = models.CharField(max_length=64, unique=True) class Login(models.Model): description = models.CharField(max_length=32) orgunit", "= models.ForeignKey(Toy) date = models.DateField(db_column='date_col') class PlayedWithNote(models.Model): played = models.ForeignKey(PlayedWith)", "PlayedWith(models.Model): child = models.ForeignKey(Child) toy = models.ForeignKey(Toy) date = models.DateField(db_column='date_col')", "policy_number = models.CharField(max_length=10) class Version(models.Model): policy = models.ForeignKey(Policy) class Location(models.Model):", "django.contrib.contenttypes.models import ContentType from django.db import models class Award(models.Model): name", "= True class Photo(Image): class Meta: proxy = True class", "models.ForeignKey(Child) toy = models.ForeignKey(Toy) date = models.DateField(db_column='date_col') class PlayedWithNote(models.Model): played", "models.CharField(max_length=20, unique=True) class Eaten(models.Model): food = models.ForeignKey(Food, to_field=\"name\") meal =", "models.EmailField(max_length=100) class Researcher(models.Model): contacts = models.ManyToManyField(Contact, related_name=\"research_contacts\") class Food(models.Model): name", "models.ForeignKey(Award) note = models.CharField(max_length=100) class Person(models.Model): name = models.CharField(max_length=25) awards", "= models.ForeignKey(Photo) class FooFileProxy(FooFile): class Meta: proxy = True class", "content_object = GenericForeignKey() class AwardNote(models.Model): award = models.ForeignKey(Award) note =", "Toy(models.Model): name = models.CharField(max_length=50) class Child(models.Model): name = models.CharField(max_length=50) toys", "= models.IntegerField() class Toy(models.Model): name = models.CharField(max_length=50) class Child(models.Model): name", "= models.ForeignKey(Child) toy = models.ForeignKey(Toy) date = models.DateField(db_column='date_col') class PlayedWithNote(models.Model):", "proxy = True class OrgUnit(models.Model): name = models.CharField(max_length=64, unique=True) class", "= models.CharField(max_length=64, unique=True) class Login(models.Model): description = models.CharField(max_length=32) orgunit =", "pass class Image(File): class Meta: proxy = True class Photo(Image):", "GenericRelation ) from django.contrib.contenttypes.models import ContentType from django.db import models", "policy = models.ForeignKey(Policy) class Location(models.Model): version = models.ForeignKey(Version, blank=True, null=True)", "Contact(models.Model): label = models.CharField(max_length=100) class Email(Contact): email_address = models.EmailField(max_length=100) class" ]
[ "descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 =", "# Display Images cv2.imshow(\"Objects Detected\",raw_image) cv2.waitKey(0) cv2.destroyAllWindows() return cX,cY ############################################################", "(cX2-cX) deltaY = -(CY2-cY) # Write X and Y values", "# frame captured without any errors # cv2.namedWindow(\"output\", cv2.WINDOW_NORMAL) #", "###########################MAIN############################# MIN_MATCH_COUNT = 10 e1 = cv2.getTickCount() # # initialize", "cv2.perspectiveTransform(pts,M) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print(\"Not enough matches are", "= cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.moveWindow('output', 150,150) # Move it to (40,30) cv2.imshow('output',img3)", "cX,cY ############################################################ ###########################MAIN############################# MIN_MATCH_COUNT = 10 e1 = cv2.getTickCount() #", "filter and helps reduce noise # The parameters used are:", "= cv2.moments(contour) # calculate x,y coordinate of center if M[\"m00\"]", "-> index of camera # s, img1 = cam.read() #", "= cv2.contourArea(contour) if ((len(approx) > 8) & (len(approx) < 23)", "cX = int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"])", "for any keyboard event cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys all the", "matches as per Lowe's ratio test. good = [] for", "draw only inliers flags = 2) img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.moveWindow('output',", "Display Images cv2.imshow(\"Objects Detected\",raw_image) cv2.waitKey(0) cv2.destroyAllWindows() return cX,cY ############################################################ ###########################MAIN#############################", "= 50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1,des2,k=2) #", "as plt ###############################SIFT MATCH Function################################# def SIFTMATCH(img1,img2): # Initiate SIFT", "# The parameters used are: the image, window size for", "time = (e2 - e1)/ cv2.getTickFrequency() print('time needed to execute')", "file.write(\"%.3f \\n\" % deltaY) file.close() #Calculate time of execution e2", "in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask", "matches in green color singlePointColor = None, matchesMask = matchesMask,", "= np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts =", "# draw matches in green color singlePointColor = None, matchesMask", "open(\"values.txt\", \"w\") file.write(\"%.3f \\n\" % deltaX) file.write(\"%.3f \\n\" % deltaY)", "cv2.WINDOW_NORMAL) # cv2.imshow(\"cam-test\",img1) # waitKey(0) # destroyWindow(\"cam-test\") # imwrite(\"Scene.jpg\",img1) #save", "coordinate of center if M[\"m00\"] != 0: cX = int(M[\"m10\"]", "mask.ravel().tolist() h,w = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst", "(cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255),", "= mask.ravel().tolist() h,w = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)", "imgray = cv2.imread('Scene.jpg', 0) # queryImage # Reference Piece Image", "= cv2.perspectiveTransform(pts,M) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print(\"Not enough matches", "plt ###############################SIFT MATCH Function################################# def SIFTMATCH(img1,img2): # Initiate SIFT detector", "draw matches in green color singlePointColor = None, matchesMask =", "None draw_params = dict(matchColor = (0,255,0), # draw matches in", "in the image It takes 3 parameters: image, lower threshold", "if s: # frame captured without any errors # cv2.namedWindow(\"output\",", "good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask =", "e2 = cv2.getTickCount() time = (e2 - e1)/ cv2.getTickFrequency() print('time", "as per Lowe's ratio test. good = [] for m,n", "= None draw_params = dict(matchColor = (0,255,0), # draw matches", "filtering forms a very good way to preserve edges. It", "0 cv2.circle(raw_image, (cX, cY), 5, (255, 255, 255), -1) cv2.putText(raw_image,", "def SIFTMATCH(img1,img2): # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() #", "img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) img2", "Bilateral filtering forms a very good way to preserve edges.", "_, contours, hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contour_list = []", "good way to preserve edges. It is a non-linear filter", "parameters: image, lower threshold and upper threshold. edge_detected_image = cv2.Canny(bilateral_filtered_image,", "flann.knnMatch(des1,des2,k=2) # store all the good matches as per Lowe's", "mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w =", "values to File file = open(\"values.txt\", \"w\") file.write(\"%.3f \\n\" %", "(area > 50000) ): contour_list.append(contour) print(\"area %.3f\"%(area)) M = cv2.moments(contour)", "MIN_MATCH_COUNT = 10 e1 = cv2.getTickCount() # # initialize the", "keyboard event cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys all the windows we", "- %d/%d\" % (len(good),MIN_MATCH_COUNT)) matchesMask = None draw_params = dict(matchColor", "= 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params,", "= cv2.getTickCount() # # initialize the camera # cam =", "# Reference Piece Image img1 = cv2.imread('img3.jpg',0) # queryImage #", "img1 = cv2.imread('img3.jpg',0) # queryImage # SIFT Algorithm fore Object", "cv2.LINE_AA) else: print(\"Not enough matches are found - %d/%d\" %", "des2 = sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm =", "Function################################# def SIFTMATCH(img1,img2): # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create()", "import pyplot as plt ###############################SIFT MATCH Function################################# def SIFTMATCH(img1,img2): #", "= cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contour_list = [] for contour in", "= sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE,", "= dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches =", "################################################################################################### #################################Function######################### def CercleDetection(img1): # Read Image raw_image = cv2.imread(img1)", "ret = cam.set(4,1080); # if s: # frame captured without", "CercleDetection('img3.jpg') print('cX = %.3f , cY =%.3f' % (cX, cY))", "matchesMask, # draw only inliers flags = 2) img3 =", "cv2.waitKey(0) cv2.destroyAllWindows() return cX,cY ############################################################ ###########################MAIN############################# MIN_MATCH_COUNT = 10 e1", "averaging the neighbour, sigmaColor(Sigma value in the color space. bilateral_filtered_image", "= cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT", "circles cv2.drawContours(raw_image, contour_list, -1, (0, 255, 0), 3) # Display", "index of camera # s, img1 = cam.read() # ret", "(cX2, cY2)) deltaX = (cX2-cX) deltaY = -(CY2-cY) # Write", "np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask =", "frame captured without any errors # cv2.namedWindow(\"output\", cv2.WINDOW_NORMAL) # cv2.imshow(\"cam-test\",img1)", "image, window size for averaging the neighbour, sigmaColor(Sigma value in", "for specified milliseconds for any keyboard event cv2.destroyAllWindows() #cv2.destroyAllWindows() simply", "=%.3f' % (cX2, cY2)) deltaX = (cX2-cX) deltaY = -(CY2-cY)", "Detected\",raw_image) cv2.waitKey(0) cv2.destroyAllWindows() return cX,cY ############################################################ ###########################MAIN############################# MIN_MATCH_COUNT = 10", "cv2.bilateralFilter(raw_image, 5, 175, 175) # Canny edge detector to detect", "pyplot as plt ###############################SIFT MATCH Function################################# def SIFTMATCH(img1,img2): # Initiate", "# image de reference cX, cY = CercleDetection('img3.jpg') print('cX =", "cv2.imshow(\"Objects Detected\",raw_image) cv2.waitKey(0) cv2.destroyAllWindows() return cX,cY ############################################################ ###########################MAIN############################# MIN_MATCH_COUNT =", "import * import numpy as np from matplotlib import pyplot", "Find Contours _, contours, hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contour_list", "23) & (area > 50000) ): contour_list.append(contour) print(\"area %.3f\"%(area)) M", "Read Image raw_image = cv2.imread(img1) # Bilateral filtering forms a", "5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params)", "good matches as per Lowe's ratio test. good = []", "0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params", "all the windows we created ################################################################################################### #################################Function######################### def CercleDetection(img1): #", "((len(approx) > 8) & (len(approx) < 23) & (area >", "M = cv2.moments(contour) # calculate x,y coordinate of center if", "Detection SIFTMATCH(img1, imgray) # image de reference cX, cY =", "the neighbour, sigmaColor(Sigma value in the color space. bilateral_filtered_image =", "= matchesMask, # draw only inliers flags = 2) img3", "= cv2.imread('Scene.jpg', 0) # queryImage # Reference Piece Image img1", "0.5, (255, 255, 255), 2) # Draw Contours of circles", "value in the color space. bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5, 175,", "and helps reduce noise # The parameters used are: the", "= None, matchesMask = matchesMask, # draw only inliers flags", "# SIFT Algorithm fore Object Detection SIFTMATCH(img1, imgray) # image", "neighbour, sigmaColor(Sigma value in the color space. bilateral_filtered_image = cv2.bilateralFilter(raw_image,", "event cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys all the windows we created", "* import numpy as np from matplotlib import pyplot as", "#cv2.destroyAllWindows() simply destroys all the windows we created ################################################################################################### #################################Function#########################", "-1) cv2.putText(raw_image, \"centroid\", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5,", "= cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1,des2,k=2) # store all the", "# Scene image in Grayscale # imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imgray", "if M[\"m00\"] != 0: cX = int(M[\"m10\"] / M[\"m00\"]) cY", "# 0 -> index of camera # s, img1 =", "cX, cY = 0, 0 cv2.circle(raw_image, (cX, cY), 5, (255,", "destroys all the windows we created ################################################################################################### #################################Function######################### def CercleDetection(img1):", "= -(CY2-cY) # Write X and Y values to File", "cv2.moveWindow('output', 150,150) # Move it to (40,30) cv2.imshow('output',img3) cv2.waitKey(0) #The", "cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w = img1.shape pts", "cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imgray = cv2.imread('Scene.jpg', 0) # queryImage # Reference Piece", "(40,30) cv2.imshow('output',img3) cv2.waitKey(0) #The function waits for specified milliseconds for", "the image, window size for averaging the neighbour, sigmaColor(Sigma value", "destroyWindow(\"cam-test\") # imwrite(\"Scene.jpg\",img1) #save image # del(cam) # Scene image", "= CercleDetection('img3.jpg') print('cX = %.3f , cY =%.3f' % (cX,", "matches are found - %d/%d\" % (len(good),MIN_MATCH_COUNT)) matchesMask = None", "as np from matplotlib import pyplot as plt ###############################SIFT MATCH", "of center if M[\"m00\"] != 0: cX = int(M[\"m10\"] /", "matchesMask = matchesMask, # draw only inliers flags = 2)", "Scene image in Grayscale # imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imgray =", "= FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50)", "# Image Webcam cX2, cY2 = CercleDetection('img3.jpg') print('cX2 = %.3f", "good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good", "s, img1 = cam.read() # ret = cam.set(3,1920); # ret", "cX2, cY2 = CercleDetection('img3.jpg') print('cX2 = %.3f , cY2 =%.3f'", "of circles cv2.drawContours(raw_image, contour_list, -1, (0, 255, 0), 3) #", "area = cv2.contourArea(contour) if ((len(approx) > 8) & (len(approx) <", "contour_list = [] for contour in contours: approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)", "draw_params = dict(matchColor = (0,255,0), # draw matches in green", "cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print(\"Not enough matches are found - %d/%d\"", "enough matches are found - %d/%d\" % (len(good),MIN_MATCH_COUNT)) matchesMask =", "simply destroys all the windows we created ################################################################################################### #################################Function######################### def", "the windows we created ################################################################################################### #################################Function######################### def CercleDetection(img1): # Read", "only inliers flags = 2) img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.moveWindow('output', 150,150)", "edge detector to detect edges in the image It takes", "edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200) # Find Contours _, contours,", "= cv2.Canny(bilateral_filtered_image, 75, 200) # Find Contours _, contours, hierarchy", "% (len(good),MIN_MATCH_COUNT)) matchesMask = None draw_params = dict(matchColor = (0,255,0),", "sigmaColor(Sigma value in the color space. bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5,", ", cY2 =%.3f' % (cX2, cY2)) deltaX = (cX2-cX) deltaY", "cv2.getTickCount() # # initialize the camera # cam = VideoCapture(0)", "SIFTMATCH(img1, imgray) # image de reference cX, cY = CercleDetection('img3.jpg')", "& (len(approx) < 23) & (area > 50000) ): contour_list.append(contour)", "queryImage # Reference Piece Image img1 = cv2.imread('img3.jpg',0) # queryImage", "= dict(matchColor = (0,255,0), # draw matches in green color", "without any errors # cv2.namedWindow(\"output\", cv2.WINDOW_NORMAL) # cv2.imshow(\"cam-test\",img1) # waitKey(0)", "search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches", "-(CY2-cY) # Write X and Y values to File file", "= cv2.imread(img1) # Bilateral filtering forms a very good way", "and upper threshold. edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200) # Find", "Write X and Y values to File file = open(\"values.txt\",", "= np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3,", "= np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask", "50000) ): contour_list.append(contour) print(\"area %.3f\"%(area)) M = cv2.moments(contour) # calculate", "contour_list, -1, (0, 255, 0), 3) # Display Images cv2.imshow(\"Objects", "cam.read() # ret = cam.set(3,1920); # ret = cam.set(4,1080); #", "= [] for m,n in matches: if m.distance < 0.7*n.distance:", "150,150) # Move it to (40,30) cv2.imshow('output',img3) cv2.waitKey(0) #The function", "ratio test. good = [] for m,n in matches: if", "upper threshold. edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200) # Find Contours", "(len(approx) < 23) & (area > 50000) ): contour_list.append(contour) print(\"area", "# Canny edge detector to detect edges in the image", "cY2 = CercleDetection('img3.jpg') print('cX2 = %.3f , cY2 =%.3f' %", "3 parameters: image, lower threshold and upper threshold. edge_detected_image =", "the image It takes 3 parameters: image, lower threshold and", "Piece Image img1 = cv2.imread('img3.jpg',0) # queryImage # SIFT Algorithm", "It takes 3 parameters: image, lower threshold and upper threshold.", "imgray) # image de reference cX, cY = CercleDetection('img3.jpg') print('cX", "deltaY) file.close() #Calculate time of execution e2 = cv2.getTickCount() time", "execution e2 = cv2.getTickCount() time = (e2 - e1)/ cv2.getTickFrequency()", "> 8) & (len(approx) < 23) & (area > 50000)", "a non-linear filter and helps reduce noise # The parameters", "cY = 0, 0 cv2.circle(raw_image, (cX, cY), 5, (255, 255,", "created ################################################################################################### #################################Function######################### def CercleDetection(img1): # Read Image raw_image =", "= 10 e1 = cv2.getTickCount() # # initialize the camera", "- 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)", "from cv2 import * import numpy as np from matplotlib", "# waitKey(0) # destroyWindow(\"cam-test\") # imwrite(\"Scene.jpg\",img1) #save image # del(cam)", "cv2 from cv2 import * import numpy as np from", "Canny edge detector to detect edges in the image It", "forms a very good way to preserve edges. It is", "255), -1) cv2.putText(raw_image, \"centroid\", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX,", "CercleDetection(img1): # Read Image raw_image = cv2.imread(img1) # Bilateral filtering", "are: the image, window size for averaging the neighbour, sigmaColor(Sigma", "# draw only inliers flags = 2) img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)", "in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in", "2) # Draw Contours of circles cv2.drawContours(raw_image, contour_list, -1, (0,", "# cam = VideoCapture(0) # 0 -> index of camera", "%.3f , cY2 =%.3f' % (cX2, cY2)) deltaX = (cX2-cX)", "m,n in matches: if m.distance < 0.7*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT:", "very good way to preserve edges. It is a non-linear", "VideoCapture(0) # 0 -> index of camera # s, img1", "trees = 5) search_params = dict(checks = 50) flann =", "Images cv2.imshow(\"Objects Detected\",raw_image) cv2.waitKey(0) cv2.destroyAllWindows() return cX,cY ############################################################ ###########################MAIN############################# MIN_MATCH_COUNT", "takes 3 parameters: image, lower threshold and upper threshold. edge_detected_image", "raw_image = cv2.imread(img1) # Bilateral filtering forms a very good", "threshold and upper threshold. edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200) #", "%.3f\"%(area)) M = cv2.moments(contour) # calculate x,y coordinate of center", "initialize the camera # cam = VideoCapture(0) # 0 ->", "it to (40,30) cv2.imshow('output',img3) cv2.waitKey(0) #The function waits for specified", "errors # cv2.namedWindow(\"output\", cv2.WINDOW_NORMAL) # cv2.imshow(\"cam-test\",img1) # waitKey(0) # destroyWindow(\"cam-test\")", "reference cX, cY = CercleDetection('img3.jpg') print('cX = %.3f , cY", "Webcam cX2, cY2 = CercleDetection('img3.jpg') print('cX2 = %.3f , cY2", "= (cX2-cX) deltaY = -(CY2-cY) # Write X and Y", "time of execution e2 = cv2.getTickCount() time = (e2 -", "find the keypoints and descriptors with SIFT kp1, des1 =", "###############################SIFT MATCH Function################################# def SIFTMATCH(img1,img2): # Initiate SIFT detector sift", "/ M[\"m00\"]) else: cX, cY = 0, 0 cv2.circle(raw_image, (cX,", "\"w\") file.write(\"%.3f \\n\" % deltaX) file.write(\"%.3f \\n\" % deltaY) file.close()", "# ret = cam.set(4,1080); # if s: # frame captured", "Algorithm fore Object Detection SIFTMATCH(img1, imgray) # image de reference", "hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contour_list = [] for contour", "cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True) area = cv2.contourArea(contour) if ((len(approx) > 8) & (len(approx)", "sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees", "# Move it to (40,30) cv2.imshow('output',img3) cv2.waitKey(0) #The function waits", "deltaX) file.write(\"%.3f \\n\" % deltaY) file.close() #Calculate time of execution", "inliers flags = 2) img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.moveWindow('output', 150,150) #", "# ret = cam.set(3,1920); # ret = cam.set(4,1080); # if", "\\n\" % deltaX) file.write(\"%.3f \\n\" % deltaY) file.close() #Calculate time", "[] for contour in contours: approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True) area =", "= cv2.getTickCount() time = (e2 - e1)/ cv2.getTickFrequency() print('time needed", "0) # queryImage # Reference Piece Image img1 = cv2.imread('img3.jpg',0)", "color singlePointColor = None, matchesMask = matchesMask, # draw only", "0, 0 cv2.circle(raw_image, (cX, cY), 5, (255, 255, 255), -1)", "file = open(\"values.txt\", \"w\") file.write(\"%.3f \\n\" % deltaX) file.write(\"%.3f \\n\"", "img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print(\"Not enough matches are found", "- 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) # Draw Contours", "matplotlib import pyplot as plt ###############################SIFT MATCH Function################################# def SIFTMATCH(img1,img2):", "The parameters used are: the image, window size for averaging", "255, 255), 2) # Draw Contours of circles cv2.drawContours(raw_image, contour_list,", "Reference Piece Image img1 = cv2.imread('img3.jpg',0) # queryImage # SIFT", "200) # Find Contours _, contours, hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE,", "any errors # cv2.namedWindow(\"output\", cv2.WINDOW_NORMAL) # cv2.imshow(\"cam-test\",img1) # waitKey(0) #", "image in Grayscale # imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imgray = cv2.imread('Scene.jpg',", "if m.distance < 0.7*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([", "Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints", "matches: if m.distance < 0.7*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT: src_pts =", "# Read Image raw_image = cv2.imread(img1) # Bilateral filtering forms", "keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2,", "cv2.circle(raw_image, (cX, cY), 5, (255, 255, 255), -1) cv2.putText(raw_image, \"centroid\",", "detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors", "% (cX2, cY2)) deltaX = (cX2-cX) deltaY = -(CY2-cY) #", "= int(M[\"m01\"] / M[\"m00\"]) else: cX, cY = 0, 0", "de reference cX, cY = CercleDetection('img3.jpg') print('cX = %.3f ,", "kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts,", "# initialize the camera # cam = VideoCapture(0) # 0", "2) img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.moveWindow('output', 150,150) # Move it to", "return cX,cY ############################################################ ###########################MAIN############################# MIN_MATCH_COUNT = 10 e1 = cv2.getTickCount()", "# if s: # frame captured without any errors #", "= flann.knnMatch(des1,des2,k=2) # store all the good matches as per", "< 23) & (area > 50000) ): contour_list.append(contour) print(\"area %.3f\"%(area))", "e1 = cv2.getTickCount() # # initialize the camera # cam", "to File file = open(\"values.txt\", \"w\") file.write(\"%.3f \\n\" % deltaX)", "[0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else:", "Contours of circles cv2.drawContours(raw_image, contour_list, -1, (0, 255, 0), 3)", "singlePointColor = None, matchesMask = matchesMask, # draw only inliers", "a very good way to preserve edges. It is a", "with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None)", "sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params =", "kp2, des2 = sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm", "]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)", "= 2) img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.moveWindow('output', 150,150) # Move it", "we created ################################################################################################### #################################Function######################### def CercleDetection(img1): # Read Image raw_image", "cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1,des2,k=2) # store all the good", "green color singlePointColor = None, matchesMask = matchesMask, # draw", "sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with", "= dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks", "M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w", "from matplotlib import pyplot as plt ###############################SIFT MATCH Function################################# def", "store all the good matches as per Lowe's ratio test.", "cY2 =%.3f' % (cX2, cY2)) deltaX = (cX2-cX) deltaY =", "def CercleDetection(img1): # Read Image raw_image = cv2.imread(img1) # Bilateral", "print('cX = %.3f , cY =%.3f' % (cX, cY)) #", "matches = flann.knnMatch(des1,des2,k=2) # store all the good matches as", "# Write X and Y values to File file =", "%d/%d\" % (len(good),MIN_MATCH_COUNT)) matchesMask = None draw_params = dict(matchColor =", "h,w = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst =", "CercleDetection('img3.jpg') print('cX2 = %.3f , cY2 =%.3f' % (cX2, cY2))", "m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)", "(0, 255, 0), 3) # Display Images cv2.imshow(\"Objects Detected\",raw_image) cv2.waitKey(0)", "np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)", "cv2.destroyAllWindows() return cX,cY ############################################################ ###########################MAIN############################# MIN_MATCH_COUNT = 10 e1 =", "SIFT Algorithm fore Object Detection SIFTMATCH(img1, imgray) # image de", "image de reference cX, cY = CercleDetection('img3.jpg') print('cX = %.3f", "else: print(\"Not enough matches are found - %d/%d\" % (len(good),MIN_MATCH_COUNT))", "x,y coordinate of center if M[\"m00\"] != 0: cX =", "Object Detection SIFTMATCH(img1, imgray) # image de reference cX, cY", "# store all the good matches as per Lowe's ratio", "index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params =", "and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2", "import numpy as np from matplotlib import pyplot as plt", "the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None)", "camera # cam = VideoCapture(0) # 0 -> index of", "per Lowe's ratio test. good = [] for m,n in", "255), 2) # Draw Contours of circles cv2.drawContours(raw_image, contour_list, -1,", "File file = open(\"values.txt\", \"w\") file.write(\"%.3f \\n\" % deltaX) file.write(\"%.3f", "parameters used are: the image, window size for averaging the", "cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys all the windows we created ###################################################################################################", "Y values to File file = open(\"values.txt\", \"w\") file.write(\"%.3f \\n\"", "= cam.set(4,1080); # if s: # frame captured without any", "8) & (len(approx) < 23) & (area > 50000) ):", "% (cX, cY)) # Image Webcam cX2, cY2 = CercleDetection('img3.jpg')", "M[\"m00\"] != 0: cX = int(M[\"m10\"] / M[\"m00\"]) cY =", "dict(matchColor = (0,255,0), # draw matches in green color singlePointColor", "import cv2 from cv2 import * import numpy as np", "all the good matches as per Lowe's ratio test. good", "MATCH Function################################# def SIFTMATCH(img1,img2): # Initiate SIFT detector sift =", "for averaging the neighbour, sigmaColor(Sigma value in the color space.", "M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"]) else: cX, cY =", "175, 175) # Canny edge detector to detect edges in", "cv2.imread('Scene.jpg', 0) # queryImage # Reference Piece Image img1 =", "cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) # Draw", "search_params) matches = flann.knnMatch(des1,des2,k=2) # store all the good matches", "[] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m)", "good = [] for m,n in matches: if m.distance <", "= 0, 0 cv2.circle(raw_image, (cX, cY), 5, (255, 255, 255),", "contours, hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contour_list = [] for", "= %.3f , cY2 =%.3f' % (cX2, cY2)) deltaX =", "kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt", "cY = int(M[\"m01\"] / M[\"m00\"]) else: cX, cY = 0,", "# find the keypoints and descriptors with SIFT kp1, des1", "# Draw Contours of circles cv2.drawContours(raw_image, contour_list, -1, (0, 255,", "flags = 2) img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.moveWindow('output', 150,150) # Move", "space. bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5, 175, 175) # Canny edge", "cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1,", "is a non-linear filter and helps reduce noise # The", "10 e1 = cv2.getTickCount() # # initialize the camera #", "to preserve edges. It is a non-linear filter and helps", "fore Object Detection SIFTMATCH(img1, imgray) # image de reference cX,", "are found - %d/%d\" % (len(good),MIN_MATCH_COUNT)) matchesMask = None draw_params", "SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE", "= int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"]) else:", "# Find Contours _, contours, hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)", "# queryImage # Reference Piece Image img1 = cv2.imread('img3.jpg',0) #", "print('cX2 = %.3f , cY2 =%.3f' % (cX2, cY2)) deltaX", "(cX, cY)) # Image Webcam cX2, cY2 = CercleDetection('img3.jpg') print('cX2", "good.append(m) if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in", "FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann", "cv2.putText(raw_image, \"centroid\", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,", "% deltaY) file.close() #Calculate time of execution e2 = cv2.getTickCount()", "pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) img2 =", "s: # frame captured without any errors # cv2.namedWindow(\"output\", cv2.WINDOW_NORMAL)", "5, (255, 255, 255), -1) cv2.putText(raw_image, \"centroid\", (cX - 25,", "captured without any errors # cv2.namedWindow(\"output\", cv2.WINDOW_NORMAL) # cv2.imshow(\"cam-test\",img1) #", "window size for averaging the neighbour, sigmaColor(Sigma value in the", "# s, img1 = cam.read() # ret = cam.set(3,1920); #", "# destroyWindow(\"cam-test\") # imwrite(\"Scene.jpg\",img1) #save image # del(cam) # Scene", "\\n\" % deltaY) file.close() #Calculate time of execution e2 =", "SIFTMATCH(img1,img2): # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find", "50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1,des2,k=2) # store", "= cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imgray = cv2.imread('Scene.jpg', 0) # queryImage # Reference", "# cv2.namedWindow(\"output\", cv2.WINDOW_NORMAL) # cv2.imshow(\"cam-test\",img1) # waitKey(0) # destroyWindow(\"cam-test\") #", "found - %d/%d\" % (len(good),MIN_MATCH_COUNT)) matchesMask = None draw_params =", "(0,255,0), # draw matches in green color singlePointColor = None,", "used are: the image, window size for averaging the neighbour,", "= sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params", "175) # Canny edge detector to detect edges in the", "\"centroid\", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255,", "of camera # s, img1 = cam.read() # ret =", "print(\"area %.3f\"%(area)) M = cv2.moments(contour) # calculate x,y coordinate of", "of execution e2 = cv2.getTickCount() time = (e2 - e1)/", "approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True) area = cv2.contourArea(contour) if ((len(approx) > 8)", "image # del(cam) # Scene image in Grayscale # imgray", "int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"]) else: cX,", "cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w = img1.shape pts = np.float32([", "cam = VideoCapture(0) # 0 -> index of camera #", "dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1,des2,k=2)", "Move it to (40,30) cv2.imshow('output',img3) cv2.waitKey(0) #The function waits for", "(255, 255, 255), 2) # Draw Contours of circles cv2.drawContours(raw_image,", "255, 0), 3) # Display Images cv2.imshow(\"Objects Detected\",raw_image) cv2.waitKey(0) cv2.destroyAllWindows()", "numpy as np from matplotlib import pyplot as plt ###############################SIFT", "np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([", "for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) if", "(len(good),MIN_MATCH_COUNT)) matchesMask = None draw_params = dict(matchColor = (0,255,0), #", "]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist()", "= (e2 - e1)/ cv2.getTickFrequency() print('time needed to execute') print(time)", "cam.set(3,1920); # ret = cam.set(4,1080); # if s: # frame", "0.7*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m", "= cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w = img1.shape", "= VideoCapture(0) # 0 -> index of camera # s,", "Draw Contours of circles cv2.drawContours(raw_image, contour_list, -1, (0, 255, 0),", "############################################################ ###########################MAIN############################# MIN_MATCH_COUNT = 10 e1 = cv2.getTickCount() # #", "= cv2.bilateralFilter(raw_image, 5, 175, 175) # Canny edge detector to", "in matches: if m.distance < 0.7*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT: src_pts", "= 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)", "detect edges in the image It takes 3 parameters: image,", "in Grayscale # imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imgray = cv2.imread('Scene.jpg', 0)", "Image raw_image = cv2.imread(img1) # Bilateral filtering forms a very", "0), 3) # Display Images cv2.imshow(\"Objects Detected\",raw_image) cv2.waitKey(0) cv2.destroyAllWindows() return", "M[\"m00\"]) else: cX, cY = 0, 0 cv2.circle(raw_image, (cX, cY),", "cv2.waitKey(0) #The function waits for specified milliseconds for any keyboard", "dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w = img1.shape pts =", "for contour in contours: approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True) area = cv2.contourArea(contour)", "0: cX = int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"] /", "cam.set(4,1080); # if s: # frame captured without any errors", "cv2.imread(img1) # Bilateral filtering forms a very good way to", "in the color space. bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5, 175, 175)", "25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) #", "any keyboard event cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys all the windows", "detector to detect edges in the image It takes 3", "#The function waits for specified milliseconds for any keyboard event", "for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for", "# del(cam) # Scene image in Grayscale # imgray =", "the good matches as per Lowe's ratio test. good =", ", cY =%.3f' % (cX, cY)) # Image Webcam cX2,", "# calculate x,y coordinate of center if M[\"m00\"] != 0:", "cv2.namedWindow(\"output\", cv2.WINDOW_NORMAL) # cv2.imshow(\"cam-test\",img1) # waitKey(0) # destroyWindow(\"cam-test\") # imwrite(\"Scene.jpg\",img1)", "#Calculate time of execution e2 = cv2.getTickCount() time = (e2", "reduce noise # The parameters used are: the image, window", "cY)) # Image Webcam cX2, cY2 = CercleDetection('img3.jpg') print('cX2 =", "bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5, 175, 175) # Canny edge detector", "dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M,", "cY = CercleDetection('img3.jpg') print('cX = %.3f , cY =%.3f' %", "= open(\"values.txt\", \"w\") file.write(\"%.3f \\n\" % deltaX) file.write(\"%.3f \\n\" %", "if ((len(approx) > 8) & (len(approx) < 23) & (area", "= CercleDetection('img3.jpg') print('cX2 = %.3f , cY2 =%.3f' % (cX2,", "imwrite(\"Scene.jpg\",img1) #save image # del(cam) # Scene image in Grayscale", "noise # The parameters used are: the image, window size", "img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.moveWindow('output', 150,150) # Move it to (40,30)", "# Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the", "#save image # del(cam) # Scene image in Grayscale #", "camera # s, img1 = cam.read() # ret = cam.set(3,1920);", "specified milliseconds for any keyboard event cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys", "%.3f , cY =%.3f' % (cX, cY)) # Image Webcam", "> 50000) ): contour_list.append(contour) print(\"area %.3f\"%(area)) M = cv2.moments(contour) #", "color space. bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5, 175, 175) # Canny", "contour in contours: approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True) area = cv2.contourArea(contour) if", "image It takes 3 parameters: image, lower threshold and upper", "/ M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"]) else: cX, cY", "print(\"Not enough matches are found - %d/%d\" % (len(good),MIN_MATCH_COUNT)) matchesMask", "file.write(\"%.3f \\n\" % deltaX) file.write(\"%.3f \\n\" % deltaY) file.close() #Calculate", "(255, 255, 255), -1) cv2.putText(raw_image, \"centroid\", (cX - 25, cY", "3) # Display Images cv2.imshow(\"Objects Detected\",raw_image) cv2.waitKey(0) cv2.destroyAllWindows() return cX,cY", "cX, cY = CercleDetection('img3.jpg') print('cX = %.3f , cY =%.3f'", "# imwrite(\"Scene.jpg\",img1) #save image # del(cam) # Scene image in", "cv2.Canny(bilateral_filtered_image, 75, 200) # Find Contours _, contours, hierarchy =", "the color space. bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5, 175, 175) #", "Grayscale # imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imgray = cv2.imread('Scene.jpg', 0) #", "len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)", "cv2.imread('img3.jpg',0) # queryImage # SIFT Algorithm fore Object Detection SIFTMATCH(img1,", "cv2.CHAIN_APPROX_SIMPLE) contour_list = [] for contour in contours: approx =", "edges. It is a non-linear filter and helps reduce noise", "deltaX = (cX2-cX) deltaY = -(CY2-cY) # Write X and", "cv2.contourArea(contour) if ((len(approx) > 8) & (len(approx) < 23) &", "function waits for specified milliseconds for any keyboard event cv2.destroyAllWindows()", "matchesMask = mask.ravel().tolist() h,w = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0]", "= cam.read() # ret = cam.set(3,1920); # ret = cam.set(4,1080);", "% deltaX) file.write(\"%.3f \\n\" % deltaY) file.close() #Calculate time of", "cv2.getTickCount() time = (e2 - e1)/ cv2.getTickFrequency() print('time needed to", "5, 175, 175) # Canny edge detector to detect edges", "non-linear filter and helps reduce noise # The parameters used", "windows we created ################################################################################################### #################################Function######################### def CercleDetection(img1): # Read Image", "= cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True) area = cv2.contourArea(contour) if ((len(approx) > 8) &", "way to preserve edges. It is a non-linear filter and", "deltaY = -(CY2-cY) # Write X and Y values to", "cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contour_list = [] for contour in contours:", "waits for specified milliseconds for any keyboard event cv2.destroyAllWindows() #cv2.destroyAllWindows()", "to (40,30) cv2.imshow('output',img3) cv2.waitKey(0) #The function waits for specified milliseconds", "m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m", "else: cX, cY = 0, 0 cv2.circle(raw_image, (cX, cY), 5,", "# cv2.imshow(\"cam-test\",img1) # waitKey(0) # destroyWindow(\"cam-test\") # imwrite(\"Scene.jpg\",img1) #save image", "75, 200) # Find Contours _, contours, hierarchy = cv2.findContours(edge_detected_image,", "center if M[\"m00\"] != 0: cX = int(M[\"m10\"] / M[\"m00\"])", "# Bilateral filtering forms a very good way to preserve", "calculate x,y coordinate of center if M[\"m00\"] != 0: cX", "if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good", "del(cam) # Scene image in Grayscale # imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)", "# queryImage # SIFT Algorithm fore Object Detection SIFTMATCH(img1, imgray)", "cv2.moments(contour) # calculate x,y coordinate of center if M[\"m00\"] !=", "= [] for contour in contours: approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True) area", "queryImage # SIFT Algorithm fore Object Detection SIFTMATCH(img1, imgray) #", "cv2.drawContours(raw_image, contour_list, -1, (0, 255, 0), 3) # Display Images", "contours: approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True) area = cv2.contourArea(contour) if ((len(approx) >", "milliseconds for any keyboard event cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys all", "): contour_list.append(contour) print(\"area %.3f\"%(area)) M = cv2.moments(contour) # calculate x,y", "Image img1 = cv2.imread('img3.jpg',0) # queryImage # SIFT Algorithm fore", "!= 0: cX = int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"]", "SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and", "255, 255), -1) cv2.putText(raw_image, \"centroid\", (cX - 25, cY -", "< 0.7*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for", "= cv2.imread('img3.jpg',0) # queryImage # SIFT Algorithm fore Object Detection", "(cX, cY), 5, (255, 255, 255), -1) cv2.putText(raw_image, \"centroid\", (cX", "des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0", "size for averaging the neighbour, sigmaColor(Sigma value in the color", "and Y values to File file = open(\"values.txt\", \"w\") file.write(\"%.3f", "np from matplotlib import pyplot as plt ###############################SIFT MATCH Function#################################", "= cam.set(3,1920); # ret = cam.set(4,1080); # if s: #", "img1 = cam.read() # ret = cam.set(3,1920); # ret =", "# imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imgray = cv2.imread('Scene.jpg', 0) # queryImage", "= cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print(\"Not enough matches are found -", "flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1,des2,k=2) # store all", "0 -> index of camera # s, img1 = cam.read()", "None, matchesMask = matchesMask, # draw only inliers flags =", "dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks =", "to detect edges in the image It takes 3 parameters:", "lower threshold and upper threshold. edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200)", "src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts", "the camera # cam = VideoCapture(0) # 0 -> index", "kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE =", "= %.3f , cY =%.3f' % (cX, cY)) # Image", "dst = cv2.perspectiveTransform(pts,M) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print(\"Not enough", "edges in the image It takes 3 parameters: image, lower", "# # initialize the camera # cam = VideoCapture(0) #", "ret = cam.set(3,1920); # ret = cam.set(4,1080); # if s:", "Lowe's ratio test. good = [] for m,n in matches:", "contour_list.append(contour) print(\"area %.3f\"%(area)) M = cv2.moments(contour) # calculate x,y coordinate", "preserve edges. It is a non-linear filter and helps reduce", "]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print(\"Not", "Contours _, contours, hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contour_list =", "cv2 import * import numpy as np from matplotlib import", "#################################Function######################### def CercleDetection(img1): # Read Image raw_image = cv2.imread(img1) #", "waitKey(0) # destroyWindow(\"cam-test\") # imwrite(\"Scene.jpg\",img1) #save image # del(cam) #", "cY2)) deltaX = (cX2-cX) deltaY = -(CY2-cY) # Write X", "threshold. edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200) # Find Contours _,", "cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contour_list = [] for contour in contours: approx", "= (0,255,0), # draw matches in green color singlePointColor =", "FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees =", "m.distance < 0.7*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt", "test. good = [] for m,n in matches: if m.distance", "-1, (0, 255, 0), 3) # Display Images cv2.imshow(\"Objects Detected\",raw_image)", "cY =%.3f' % (cX, cY)) # Image Webcam cX2, cY2", "in green color singlePointColor = None, matchesMask = matchesMask, #", "=%.3f' % (cX, cY)) # Image Webcam cX2, cY2 =", "imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imgray = cv2.imread('Scene.jpg', 0) # queryImage #", "25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) # Draw Contours of", "= img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M)", "Image Webcam cX2, cY2 = CercleDetection('img3.jpg') print('cX2 = %.3f ,", "X and Y values to File file = open(\"values.txt\", \"w\")", "int(M[\"m01\"] / M[\"m00\"]) else: cX, cY = 0, 0 cv2.circle(raw_image,", "image, lower threshold and upper threshold. edge_detected_image = cv2.Canny(bilateral_filtered_image, 75,", "It is a non-linear filter and helps reduce noise #", "file.close() #Calculate time of execution e2 = cv2.getTickCount() time =", "cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.moveWindow('output', 150,150) # Move it to (40,30) cv2.imshow('output',img3) cv2.waitKey(0)", "helps reduce noise # The parameters used are: the image,", "for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts,", "cY), 5, (255, 255, 255), -1) cv2.putText(raw_image, \"centroid\", (cX -", "cv2.imshow(\"cam-test\",img1) # waitKey(0) # destroyWindow(\"cam-test\") # imwrite(\"Scene.jpg\",img1) #save image #", "cv2.imshow('output',img3) cv2.waitKey(0) #The function waits for specified milliseconds for any", "matchesMask = None draw_params = dict(matchColor = (0,255,0), # draw", "& (area > 50000) ): contour_list.append(contour) print(\"area %.3f\"%(area)) M =", "in contours: approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True) area = cv2.contourArea(contour) if ((len(approx)" ]
[ "dash.dependencies import Input, Output, State import dash_bootstrap_components as dbc import", "right provides some \" \"close alternatives.\" ), className=\"card-text\", ), ]", "i in range(2, 6)], *[Output(f\"other-results-tooltip-{i}\", \"children\") for i in range(2,", "ids = [] i = 2 for index, row in", "children=[ dbc.CardHeader(\"Other great phones:\"), dbc.CardBody( id=\"other-results\", children=( [ html.P( html.Span(", "in [\"os\", \"memory\", \"ram\", \"cam\", \"cost\"] ], ) def results(*choices):", "html_for=\"ram-choice\", ), dcc.Slider( id=\"ram-choice\", min=2, max=12, step=1, value=12, included=False, marks={", ") def results(*choices): if choices[0] == \"both\": choice_data = data", "], className=\"mr-3 ml-3 mb-2 mt-2\", ), ], style={\"maxHeight\": \"560px\", \"overflow\":", "contents.append(f\"{i}. {row['Model']}\") tables.append(table_from_data_horizontal(row)) i = i + 1 return contents,", "Top card with details(?) dbc.Card( children=[ dbc.CardBody( [ html.H4( \"Researcher's", "st else False for st in data[\"OS\"]]] if choices[0] ==", "table_from_data(data, choices): # print(choices) to_compare = [\"Memory\", \"RAM\", \"Camera (MP)\",", "dbc.Label( \"Choose desired RAM capacity (GB)\", html_for=\"ram-choice\", ), dcc.Slider( id=\"ram-choice\",", "], id=\"os-choice\", value=\"both\", inline=True, # className=\"text-center mt-4\", ), ], className=\"mr-3", "table_from_data_horizontal(data): header = [html.Thead(html.Tr([html.Th(col) for col in data.index]))] body =", "i = i + 1 return contents, tables if __name__", "\"auto\"}, ), ], width={\"size\": 5, \"offset\": 1}, ), dbc.Col( children=[", "def table_from_data(data, choices): # print(choices) to_compare = [\"Memory\", \"RAM\", \"Camera", "shows the phone \" \"which matches the preferences the best.", "\"The box on bottom right provides some \" \"close alternatives.\"", "), dbc.FormGroup( children=[ dbc.Label( \"Choose desired budget (Euros)\", html_for=\"cost-choice\", ),", "\"Choose desired Memory capacity (GB)\", html_for=\"memory-choice\", ), dcc.Slider( id=\"memory-choice\", min=16,", "right shows the phone \" \"which matches the preferences the", "\"border-color\": \"black\", }, ) for i in range(2, 6) ]", "phone for you is:\"), dbc.CardBody(id=\"results\"), ], className=\"mb-4\", ), dbc.Card( children=[", "dash.Dash( __name__, external_stylesheets=[dbc.themes.LITERA], eager_loading=True, suppress_callback_exceptions=True, ) app.layout = html.Div( children=[", "], [ Input(f\"{attr}-choice\", \"value\") for attr in [\"os\", \"memory\", \"ram\",", "i in range(num): content.append(dbc.Tooltip(tooldict[\"tables\"][i], target=tooldict[\"ids\"][i])) return content\"\"\" def table_from_data(data, choices):", "- choices) * [1, 1, 1, -1] colors = [None,", "html.Td([html.Span(\" ▉\", style={\"color\": c,},)],), ] ) for (col, c) in", "distance = (aspirations - relevant_data) / (ideal - nadir) distance", "app = dash.Dash( __name__, external_stylesheets=[dbc.themes.LITERA], eager_loading=True, suppress_callback_exceptions=True, ) app.layout =", ">= 0 else \"red\" for x in diff] # print(np.sign(diff))", "dbc.Table(header + body) def other_options(data): contents = [] tables =", "ex import plotly.graph_objects as go import pandas as pd import", "\"7\", 8: \"8\", 9: \"9\", 10: \"10\", 11: \"11\", 12:", "total_number = len(distance_order) if total_number >= 4: others, tooltips =", "relevant_data) / (ideal - nadir) distance = distance.max(axis=1) distance_order =", "style={\"maxHeight\": \"560px\", \"overflow\": \"auto\"}, ), ], width={\"size\": 5, \"offset\": 1},", "html_for=\"os-choice\", ), dbc.RadioItems( options=[ { \"label\": \"Android\", \"value\": \"Android\", },", "[ html.P( html.Span( f\"{i}. \", id=f\"other-results-list-{i}\", ) ) for i", "*others, *tooltips) \"\"\"@app.callback(Output(\"tooltips\", \"children\"), [Input(\"callback-dump\", \"children\")]) def tooltips(tooldict): num =", "children=\"What is your optimal phone?\", className=\"text-center mt-4\", ) ) ]", "90: \"90\", 110: \"110\", 130: \"130\", }, className=\"text-center mt-5\", ),", "distance = distance.max(axis=1) distance_order = np.argsort(distance) best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:])", "(data[to_compare].values - choices) * [1, 1, 1, -1] colors =", "if choices[0] == \"both\": choice_data = data elif choices[0] ==", "\"cam\", \"cost\"] ], ) def results(*choices): if choices[0] == \"both\":", "options=[ { \"label\": \"Android\", \"value\": \"Android\", }, {\"label\": \"iOS\", \"value\":", "3: \"3\", 4: \"4\", 5: \"5\", 6: \"6\", 7: \"7\",", "= details.loc[0] data = data.rename(columns=names) details = details.rename(columns=names) maxi =", "details.rename(columns=names) maxi = details.loc[1].astype(int) details_on_card = details.loc[2].astype(int) details_on_card = details.columns[details_on_card", "\"cost\"] ], ) def results(*choices): if choices[0] == \"both\": choice_data", "relevant_data.min().values nadir = relevant_data.max().values aspirations = choices[1:] * maxi distance", "import dash_table import plotly.express as ex import plotly.graph_objects as go", "[ dbc.FormGroup( children=[ dbc.Label( \"Choose desired operating system\", html_for=\"os-choice\", ),", "RAM capacity (GB)\", html_for=\"ram-choice\", ), dcc.Slider( id=\"ram-choice\", min=2, max=12, step=1,", "50: \"50\", 70: \"70\", 90: \"90\", 110: \"110\", 130: \"130\",", "to_compare = [\"Memory\", \"RAM\", \"Camera (MP)\", \"Price (Euros)\"] # print(data[to_compare].values)", "for i in range(2, 6) ] ), ), ], className=\"mt-4\",", "(ideal - nadir) distance = distance.max(axis=1) distance_order = np.argsort(distance) best", "\"1000\", 1200: \"1200\", 1400: \"1400\", }, className=\"text-center mt-5\", ), ],", "\"IOS\" in st else False for st in data[\"OS\"]]] if", "included=False, value=256, marks={ 16: \"16\", 32: \"32\", 64: \"64\", 128:", "== \"IOS\": choice_data = data[[True if \"IOS\" in st else", "tooltips = other_options( card_data.loc[distance_order.values[1:total_number]] ) others = others + [f\"{i}.", "children=( [ html.P( html.Span( f\"{i}. \", id=f\"other-results-list-{i}\", ) ) for", "), ], className=\"mt-4\", ), html.Div(id=\"tooltips\"), ], width={\"size\": 5, \"offset\": 0},", "target=tooldict[\"ids\"][i])) return content\"\"\" def table_from_data(data, choices): # print(choices) to_compare =", "\"memory\", \"ram\", \"cam\", \"cost\"] ], ) def results(*choices): if choices[0]", "= [\"Memory\", \"RAM\", \"Camera (MP)\", \"Price (Euros)\"] # print(data[to_compare].values) diff", "\"5\", 6: \"6\", 7: \"7\", 8: \"8\", 9: \"9\", 10:", "7: \"7\", 8: \"8\", 9: \"9\", 10: \"10\", 11: \"11\",", "else False for st in data[\"OS\"]]] if choices[0] == \"Android\":", "content\"\"\" def table_from_data(data, choices): # print(choices) to_compare = [\"Memory\", \"RAM\",", "data = pd.read_csv(\"./data/Phone_dataset_new.csv\", header=0) details = pd.read_csv(\"./data/Phone_details.csv\", header=0) names =", "diff] # print(np.sign(diff)) return dbc.Table( [ html.Tbody( [ html.Tr( [", "import dash_core_components as dcc import dash_html_components as html from dash.dependencies", "\"RAM\", \"Camera (MP)\", \"Price (Euros)\",] ].reset_index(drop=True) card_data = choice_data[details_on_card].reset_index(drop=True) maxi", "placement=\"right\", style={ \"maxWidth\": 700, \"background-color\": \"white\", \"color\": \"white\", \"border-style\": \"solid\",", "html.Td([str(data[col]),],), html.Td([html.Span(\" ▉\", style={\"color\": c,},)],), ] ) for (col, c)", "details_on_card = details.columns[details_on_card == 1] fitness_columns = { \"Memory\": -1,", "if \"Android\" in st else False for st in data[\"OS\"]]]", "is:\"), dbc.CardBody(id=\"results\"), ], className=\"mb-4\", ), dbc.Card( children=[ dbc.CardHeader(\"Other great phones:\"),", "[] tables = [] ids = [] i = 2", "\"both\": choice_data = data elif choices[0] == \"IOS\": choice_data =", "\"Android\", }, {\"label\": \"iOS\", \"value\": \"IOS\"}, { \"label\": \"No preference\",", "\"label\": \"No preference\", \"value\": \"both\", }, ], id=\"os-choice\", value=\"both\", inline=True,", "\"Price (Euros)\"] # print(data[to_compare].values) diff = (data[to_compare].values - choices) *", "\"90\", 110: \"110\", 130: \"130\", }, className=\"text-center mt-5\", ), ],", "], style={\"maxHeight\": \"560px\", \"overflow\": \"auto\"}, ), ], width={\"size\": 5, \"offset\":", "value=\"both\", inline=True, # className=\"text-center mt-4\", ), ], className=\"mr-3 ml-3 mb-2", "dbc.Label( \"Choose desired camera resolution (MP)\", html_for=\"cam-choice\", ), dcc.Slider( id=\"cam-choice\",", "6)] tooltips = tooltips + [None for i in range(len(tooltips)", "\"2\", 3: \"3\", 4: \"4\", 5: \"5\", 6: \"6\", 7:", "), html.P( ( \"This app uses decision support tools to", "\"children\") for i in range(2, 6)], ], [ Input(f\"{attr}-choice\", \"value\")", "mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired camera resolution (MP)\",", "html.Tbody( [ html.Tr( [ html.Th(col), html.Td([str(data[col]),],), html.Td([html.Span(\" ▉\", style={\"color\": c,},)],),", "(aspirations - relevant_data) / (ideal - nadir) distance = distance.max(axis=1)", "other_options(card_data.loc[distance_order.values[1:5]]) else: others, tooltips = other_options( card_data.loc[distance_order.values[1:total_number]] ) others =", "text-center\", ), html.P( ( \"This app uses decision support tools", "as dcc import dash_html_components as html from dash.dependencies import Input,", "[f\"{i}. -\" for i in range(len(others) + 2, 6)] tooltips", ") others = others + [f\"{i}. -\" for i in", "+ [ dbc.Tooltip( id=f\"other-results-tooltip-{i}\", target=f\"other-results-list-{i}\", placement=\"right\", style={ \"maxWidth\": 700, \"background-color\":", "= pd.read_csv(\"./data/Phone_details.csv\", header=0) names = details.loc[0] data = data.rename(columns=names) details", "= data[[True if \"IOS\" in st else False for st", "maxi ideal = relevant_data.min().values nadir = relevant_data.max().values aspirations = choices[1:]", "dbc.CardBody( [ html.H4( \"Researcher's Night Event\", className=\"card-title text-center\", ), html.P(", "\" \"the user's desires. Input your preferences \" \"below. The", "x >= 0 else \"red\" for x in diff] #", "alternatives.\" ), className=\"card-text\", ), ] ) ], className=\"mr-3 ml-3 mb-2", "\"64\", 128: \"128\", 256: \"256\", }, # className=\"text-center mt-5\", ),", "i in range(len(tooltips) + 2, 6)] return (best, *others, *tooltips)", "for (col, c) in zip(data.index, colors) ] ) ] )", "mt-5\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.FormGroup( children=[", "5, \"offset\": 0}, className=\"mb-2 mt-2\", ), ] ), dbc.Row([html.Div(id=\"callback-dump\")]), ],", "].reset_index(drop=True) card_data = choice_data[details_on_card].reset_index(drop=True) maxi = np.asarray([-1, -1, -1, 1])", "np.argsort(distance) best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:]) total_number = len(distance_order) if total_number", "className=\"card-title text-center\", ), html.P( ( \"This app uses decision support", "}, className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\", ),", "which reflect \" \"the user's desires. Input your preferences \"", "), dbc.FormGroup( children=[ dbc.Label( \"Choose desired Memory capacity (GB)\", html_for=\"memory-choice\",", "st else False for st in data[\"OS\"]]] relevant_data = choice_data[", "\"16\", 32: \"32\", 64: \"64\", 128: \"128\", 256: \"256\", },", "i + 1 return contents, tables if __name__ == \"__main__\":", "@app.callback( [ Output(\"results\", \"children\"), *[Output(f\"other-results-list-{i}\", \"children\") for i in range(2,", "marks={ 0: \"0\", 10: \"10\", 30: \"30\", 50: \"50\", 70:", "print(np.sign(diff)) return dbc.Table( [ html.Tbody( [ html.Tr( [ html.Th(col), html.Td([str(data[col]),],),", "dash.exceptions import PreventUpdate import dash_core_components as dcc import dash_html_components as", "mt-5\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\", ), ], style={\"maxHeight\":", "], className=\"mb-4\", ), dbc.Card( children=[ dbc.CardHeader(\"Other great phones:\"), dbc.CardBody( id=\"other-results\",", "tooltips + [None for i in range(len(tooltips) + 2, 6)]", "\"RAM\", \"Camera (MP)\", \"Price (Euros)\"] # print(data[to_compare].values) diff = (data[to_compare].values", "/ (ideal - nadir) distance = distance.max(axis=1) distance_order = np.argsort(distance)", "id=\"cam-choice\", min=0, max=130, step=1, included=False, value=70, marks={ 0: \"0\", 10:", "html.Span( f\"{i}. \", id=f\"other-results-list-{i}\", ) ) for i in range(2,", "-1, \"Price (Euros)\": 1, } fitness_data = data[fitness_columns] * maxi[fitness_columns].values", "dbc.CardBody(id=\"results\"), ], className=\"mb-4\", ), dbc.Card( children=[ dbc.CardHeader(\"Other great phones:\"), dbc.CardBody(", "tooltips = tooltips + [None for i in range(len(tooltips) +", "children=[ dbc.Label( \"Choose desired budget (Euros)\", html_for=\"cost-choice\", ), dcc.Slider( id=\"cost-choice\",", "), dbc.FormGroup( children=[ dbc.Label( \"Choose desired camera resolution (MP)\", html_for=\"cam-choice\",", "Output, State import dash_bootstrap_components as dbc import dash_table import plotly.express", "\"Memory\": -1, \"RAM\": -1, \"Camera (MP)\": -1, \"Price (Euros)\": 1,", "in range(2, 6)], *[Output(f\"other-results-tooltip-{i}\", \"children\") for i in range(2, 6)],", "), dcc.Slider( id=\"memory-choice\", min=16, max=256, step=None, included=False, value=256, marks={ 16:", "mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired RAM capacity (GB)\",", "style={\"color\": c,},)],), ] ) for (col, c) in zip(data.index, colors)", "desired operating system\", html_for=\"os-choice\", ), dbc.RadioItems( options=[ { \"label\": \"Android\",", "\"8\", 9: \"9\", 10: \"10\", 11: \"11\", 12: \"12\", },", "import Input, Output, State import dash_bootstrap_components as dbc import dash_table", "400: \"400\", 600: \"600\", 800: \"800\", 1000: \"1000\", 1200: \"1200\",", "(Euros)\",] ].reset_index(drop=True) card_data = choice_data[details_on_card].reset_index(drop=True) maxi = np.asarray([-1, -1, -1,", "-\" for i in range(len(others) + 2, 6)] tooltips =", "data.iterrows(): contents.append(f\"{i}. {row['Model']}\") tables.append(table_from_data_horizontal(row)) i = i + 1 return", "}, ], id=\"os-choice\", value=\"both\", inline=True, # className=\"text-center mt-4\", ), ],", "), dbc.RadioItems( options=[ { \"label\": \"Android\", \"value\": \"Android\", }, {\"label\":", "[] for i in range(num): content.append(dbc.Tooltip(tooldict[\"tables\"][i], target=tooldict[\"ids\"][i])) return content\"\"\" def", "scalable dbc.Row( [ dbc.Col( html.H1( children=\"What is your optimal phone?\",", "step=1, included=False, value=70, marks={ 0: \"0\", 10: \"10\", 30: \"30\",", "[ html.Tbody( [ html.Tr( [ html.Th(col), html.Td([str(data[col]),],), html.Td([html.Span(\" ▉\", style={\"color\":", "[ Input(f\"{attr}-choice\", \"value\") for attr in [\"os\", \"memory\", \"ram\", \"cam\",", "= (data[to_compare].values - choices) * [1, 1, 1, -1] colors", "mb-2 mt-2\", ), ], style={\"maxHeight\": \"560px\", \"overflow\": \"auto\"}, ), ],", "as pd import numpy as np data = pd.read_csv(\"./data/Phone_dataset_new.csv\", header=0)", "your preferences \" \"below. The box on top right shows", "as ex import plotly.graph_objects as go import pandas as pd", "State import dash_bootstrap_components as dbc import dash_table import plotly.express as", "-1, \"RAM\": -1, \"Camera (MP)\": -1, \"Price (Euros)\": 1, }", "min=0, max=1400, step=1, included=False, value=100, marks={ 0: \"0\", 200: \"200\",", "from dash.exceptions import PreventUpdate import dash_core_components as dcc import dash_html_components", "step=1, included=False, value=100, marks={ 0: \"0\", 200: \"200\", 400: \"400\",", "= { \"Memory\": -1, \"RAM\": -1, \"Camera (MP)\": -1, \"Price", "data[[True if \"IOS\" in st else False for st in", "*tooltips) \"\"\"@app.callback(Output(\"tooltips\", \"children\"), [Input(\"callback-dump\", \"children\")]) def tooltips(tooldict): num = len(tooldict[\"ids\"])", "= relevant_data.min().values nadir = relevant_data.max().values aspirations = choices[1:] * maxi", ") ) for i in range(2, 6) ] + [", "] ), dbc.Row( [ dbc.Col( children=[ # Top card with", "the best. \" \"The box on bottom right provides some", "ml-3 mb-2 mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired budget", "], className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.Form( [ dbc.FormGroup( children=[", "className=\"mt-4\", ), html.Div(id=\"tooltips\"), ], width={\"size\": 5, \"offset\": 0}, className=\"mb-2 mt-2\",", "inline=True, # className=\"text-center mt-4\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\",", "dbc.FormGroup( children=[ dbc.Label( \"Choose desired operating system\", html_for=\"os-choice\", ), dbc.RadioItems(", "support tools to \" \"quickly and easily find phones which", ") @app.callback( [ Output(\"results\", \"children\"), *[Output(f\"other-results-list-{i}\", \"children\") for i in", "\"1400\", }, className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\",", "table_from_data(card_data.loc[distance_order.values[0]], choices[1:]) total_number = len(distance_order) if total_number >= 4: others,", "\"offset\": 0}, className=\"mb-2 mt-2\", ), ] ), dbc.Row([html.Div(id=\"callback-dump\")]), ], )", "preference\", \"value\": \"both\", }, ], id=\"os-choice\", value=\"both\", inline=True, # className=\"text-center", "desired RAM capacity (GB)\", html_for=\"ram-choice\", ), dcc.Slider( id=\"ram-choice\", min=2, max=12,", "= table_from_data(card_data.loc[distance_order.values[0]], choices[1:]) total_number = len(distance_order) if total_number >= 4:", "col in data.index]))] body = [html.Tbody([html.Tr([html.Td(data[col]) for col in data.index])])]", "id=\"ram-choice\", min=2, max=12, step=1, value=12, included=False, marks={ 2: \"2\", 3:", "nadir) distance = distance.max(axis=1) distance_order = np.argsort(distance) best = table_from_data(card_data.loc[distance_order.values[0]],", "choice_data = data elif choices[0] == \"IOS\": choice_data = data[[True", "system\", html_for=\"os-choice\", ), dbc.RadioItems( options=[ { \"label\": \"Android\", \"value\": \"Android\",", "256: \"256\", }, # className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3", "], width={\"size\": 5, \"offset\": 1}, ), dbc.Col( children=[ dbc.Card( children=[", "dcc.Slider( id=\"ram-choice\", min=2, max=12, step=1, value=12, included=False, marks={ 2: \"2\",", "colors = [None, None, None] + [\"green\" if x >=", "= [] tables = [] ids = [] i =", "dash from dash.exceptions import PreventUpdate import dash_core_components as dcc import", "as html from dash.dependencies import Input, Output, State import dash_bootstrap_components", "* maxi distance = (aspirations - relevant_data) / (ideal -", "in diff] # print(np.sign(diff)) return dbc.Table( [ html.Tbody( [ html.Tr(", "bottom right provides some \" \"close alternatives.\" ), className=\"card-text\", ),", "className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\", ), ],", "mb-2 mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired budget (Euros)\",", "= [] i = 2 for index, row in data.iterrows():", "max=130, step=1, included=False, value=70, marks={ 0: \"0\", 10: \"10\", 30:", "i in range(2, 6)], ], [ Input(f\"{attr}-choice\", \"value\") for attr", "app.layout = html.Div( children=[ # .container class is fixed, .container.scalable", "= dash.Dash( __name__, external_stylesheets=[dbc.themes.LITERA], eager_loading=True, suppress_callback_exceptions=True, ) app.layout = html.Div(", "html.H4( \"Researcher's Night Event\", className=\"card-title text-center\", ), html.P( ( \"This", "*[Output(f\"other-results-tooltip-{i}\", \"children\") for i in range(2, 6)], ], [ Input(f\"{attr}-choice\",", "\"red\" for x in diff] # print(np.sign(diff)) return dbc.Table( [", "\"130\", }, className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\",", "aspirations = choices[1:] * maxi distance = (aspirations - relevant_data)", "\"0\", 200: \"200\", 400: \"400\", 600: \"600\", 800: \"800\", 1000:", "class is fixed, .container.scalable is scalable dbc.Row( [ dbc.Col( html.H1(", "ml-3 mb-2 mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired Memory", "\"6\", 7: \"7\", 8: \"8\", 9: \"9\", 10: \"10\", 11:", "dbc.Row( [ dbc.Col( html.H1( children=\"What is your optimal phone?\", className=\"text-center", "eager_loading=True, suppress_callback_exceptions=True, ) app.layout = html.Div( children=[ # .container class", ".container.scalable is scalable dbc.Row( [ dbc.Col( html.H1( children=\"What is your", "The box on top right shows the phone \" \"which", "= np.argsort(distance) best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:]) total_number = len(distance_order) if", "mb-2 mt-2\", ), dbc.Form( [ dbc.FormGroup( children=[ dbc.Label( \"Choose desired", "len(distance_order) if total_number >= 4: others, tooltips = other_options(card_data.loc[distance_order.values[1:5]]) else:", "\"which matches the preferences the best. \" \"The box on", "= choice_data[ [\"Memory\", \"RAM\", \"Camera (MP)\", \"Price (Euros)\",] ].reset_index(drop=True) card_data", "external_stylesheets=[dbc.themes.LITERA], eager_loading=True, suppress_callback_exceptions=True, ) app.layout = html.Div( children=[ # .container", "\"children\") for i in range(2, 6)], *[Output(f\"other-results-tooltip-{i}\", \"children\") for i", "import plotly.graph_objects as go import pandas as pd import numpy", "id=\"cost-choice\", min=0, max=1400, step=1, included=False, value=100, marks={ 0: \"0\", 200:", "\" \"The box on bottom right provides some \" \"close", "card with details(?) dbc.Card( children=[ dbc.CardBody( [ html.H4( \"Researcher's Night", "className=\"mb-2 mt-2\", ), ] ), dbc.Row([html.Div(id=\"callback-dump\")]), ], ) @app.callback( [", "(col, c) in zip(data.index, colors) ] ) ] ) def", "\"close alternatives.\" ), className=\"card-text\", ), ] ) ], className=\"mr-3 ml-3", "dbc.Row([html.Div(id=\"callback-dump\")]), ], ) @app.callback( [ Output(\"results\", \"children\"), *[Output(f\"other-results-list-{i}\", \"children\") for", "st in data[\"OS\"]]] if choices[0] == \"Android\": choice_data = data[[True", "resolution (MP)\", html_for=\"cam-choice\", ), dcc.Slider( id=\"cam-choice\", min=0, max=130, step=1, included=False,", "10: \"10\", 11: \"11\", 12: \"12\", }, className=\"text-center mt-5\", ),", ">= 4: others, tooltips = other_options(card_data.loc[distance_order.values[1:5]]) else: others, tooltips =", "width={\"size\": 5, \"offset\": 0}, className=\"mb-2 mt-2\", ), ] ), dbc.Row([html.Div(id=\"callback-dump\")]),", "others + [f\"{i}. -\" for i in range(len(others) + 2,", "children=[ dbc.Label( \"Choose desired RAM capacity (GB)\", html_for=\"ram-choice\", ), dcc.Slider(", "], ) def results(*choices): if choices[0] == \"both\": choice_data =", "box on top right shows the phone \" \"which matches", "def results(*choices): if choices[0] == \"both\": choice_data = data elif", "data[[True if \"Android\" in st else False for st in", "choice_data = data[[True if \"Android\" in st else False for", "tables = [] ids = [] i = 2 for", "value=256, marks={ 16: \"16\", 32: \"32\", 64: \"64\", 128: \"128\",", "go import pandas as pd import numpy as np data", "choices) * [1, 1, 1, -1] colors = [None, None,", "on bottom right provides some \" \"close alternatives.\" ), className=\"card-text\",", "= details.loc[1].astype(int) details_on_card = details.loc[2].astype(int) details_on_card = details.columns[details_on_card == 1]", "header=0) names = details.loc[0] data = data.rename(columns=names) details = details.rename(columns=names)", "Memory capacity (GB)\", html_for=\"memory-choice\", ), dcc.Slider( id=\"memory-choice\", min=16, max=256, step=None,", "step=None, included=False, value=256, marks={ 16: \"16\", 32: \"32\", 64: \"64\",", "style={ \"maxWidth\": 700, \"background-color\": \"white\", \"color\": \"white\", \"border-style\": \"solid\", \"border-color\":", "preferences the best. \" \"The box on bottom right provides", "children=[ # Top card with details(?) dbc.Card( children=[ dbc.CardBody( [", "choice_data = data[[True if \"IOS\" in st else False for", "app uses decision support tools to \" \"quickly and easily", "\"Choose desired camera resolution (MP)\", html_for=\"cam-choice\", ), dcc.Slider( id=\"cam-choice\", min=0,", "in range(num): content.append(dbc.Tooltip(tooldict[\"tables\"][i], target=tooldict[\"ids\"][i])) return content\"\"\" def table_from_data(data, choices): #", "html.Div( children=[ # .container class is fixed, .container.scalable is scalable", "600: \"600\", 800: \"800\", 1000: \"1000\", 1200: \"1200\", 1400: \"1400\",", "\"border-style\": \"solid\", \"border-color\": \"black\", }, ) for i in range(2,", "+ body) def other_options(data): contents = [] tables = []", "other_options( card_data.loc[distance_order.values[1:total_number]] ) others = others + [f\"{i}. -\" for", "capacity (GB)\", html_for=\"memory-choice\", ), dcc.Slider( id=\"memory-choice\", min=16, max=256, step=None, included=False,", "preferences \" \"below. The box on top right shows the", "\"children\"), [Input(\"callback-dump\", \"children\")]) def tooltips(tooldict): num = len(tooldict[\"ids\"]) content =", "+ 2, 6)] tooltips = tooltips + [None for i", "your optimal phone?\", className=\"text-center mt-4\", ) ) ] ), dbc.Row(", "(Euros)\", html_for=\"cost-choice\", ), dcc.Slider( id=\"cost-choice\", min=0, max=1400, step=1, included=False, value=100,", "c,},)],), ] ) for (col, c) in zip(data.index, colors) ]", "import dash_html_components as html from dash.dependencies import Input, Output, State", "in data[\"OS\"]]] if choices[0] == \"Android\": choice_data = data[[True if", "className=\"text-center mt-4\", ) ) ] ), dbc.Row( [ dbc.Col( children=[", "numpy as np data = pd.read_csv(\"./data/Phone_dataset_new.csv\", header=0) details = pd.read_csv(\"./data/Phone_details.csv\",", "\"Camera (MP)\": -1, \"Price (Euros)\": 1, } fitness_data = data[fitness_columns]", "best phone for you is:\"), dbc.CardBody(id=\"results\"), ], className=\"mb-4\", ), dbc.Card(", "phones:\"), dbc.CardBody( id=\"other-results\", children=( [ html.P( html.Span( f\"{i}. \", id=f\"other-results-list-{i}\",", "nadir = relevant_data.max().values aspirations = choices[1:] * maxi distance =", "* [1, 1, 1, -1] colors = [None, None, None]", "\"Android\" in st else False for st in data[\"OS\"]]] relevant_data", "\"110\", 130: \"130\", }, className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3", "dash_table import plotly.express as ex import plotly.graph_objects as go import", "6) ] ), ), ], className=\"mt-4\", ), html.Div(id=\"tooltips\"), ], width={\"size\":", "2, 6)] tooltips = tooltips + [None for i in", "6)], ], [ Input(f\"{attr}-choice\", \"value\") for attr in [\"os\", \"memory\",", "= details.columns[details_on_card == 1] fitness_columns = { \"Memory\": -1, \"RAM\":", "1400: \"1400\", }, className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2", "desired budget (Euros)\", html_for=\"cost-choice\", ), dcc.Slider( id=\"cost-choice\", min=0, max=1400, step=1,", "dbc.FormGroup( children=[ dbc.Label( \"Choose desired camera resolution (MP)\", html_for=\"cam-choice\", ),", "\"value\") for attr in [\"os\", \"memory\", \"ram\", \"cam\", \"cost\"] ],", "(GB)\", html_for=\"memory-choice\", ), dcc.Slider( id=\"memory-choice\", min=16, max=256, step=None, included=False, value=256,", "html_for=\"cost-choice\", ), dcc.Slider( id=\"cost-choice\", min=0, max=1400, step=1, included=False, value=100, marks={", "Output(\"results\", \"children\"), *[Output(f\"other-results-list-{i}\", \"children\") for i in range(2, 6)], *[Output(f\"other-results-tooltip-{i}\",", "= relevant_data * maxi ideal = relevant_data.min().values nadir = relevant_data.max().values", "distance_order = np.argsort(distance) best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:]) total_number = len(distance_order)", "results(*choices): if choices[0] == \"both\": choice_data = data elif choices[0]", "= other_options( card_data.loc[distance_order.values[1:total_number]] ) others = others + [f\"{i}. -\"", "} fitness_data = data[fitness_columns] * maxi[fitness_columns].values external_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"] app", "), ), ], className=\"mt-4\", ), html.Div(id=\"tooltips\"), ], width={\"size\": 5, \"offset\":", "className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired", "value=70, marks={ 0: \"0\", 10: \"10\", 30: \"30\", 50: \"50\",", "dbc.Col( children=[ # Top card with details(?) dbc.Card( children=[ dbc.CardBody(", "maxi[fitness_columns].values external_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"] app = dash.Dash( __name__, external_stylesheets=[dbc.themes.LITERA], eager_loading=True,", "is scalable dbc.Row( [ dbc.Col( html.H1( children=\"What is your optimal", "None] + [\"green\" if x >= 0 else \"red\" for", "+ 2, 6)] return (best, *others, *tooltips) \"\"\"@app.callback(Output(\"tooltips\", \"children\"), [Input(\"callback-dump\",", "1, -1] colors = [None, None, None] + [\"green\" if", "easily find phones which reflect \" \"the user's desires. Input", "), ] ), dbc.Row([html.Div(id=\"callback-dump\")]), ], ) @app.callback( [ Output(\"results\", \"children\"),", "\"maxWidth\": 700, \"background-color\": \"white\", \"color\": \"white\", \"border-style\": \"solid\", \"border-color\": \"black\",", "\"256\", }, # className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2", "value=12, included=False, marks={ 2: \"2\", 3: \"3\", 4: \"4\", 5:", "import dash from dash.exceptions import PreventUpdate import dash_core_components as dcc", "), ], className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.FormGroup( children=[ dbc.Label(", "for i in range(2, 6)], *[Output(f\"other-results-tooltip-{i}\", \"children\") for i in", "*[Output(f\"other-results-list-{i}\", \"children\") for i in range(2, 6)], *[Output(f\"other-results-tooltip-{i}\", \"children\") for", "for x in diff] # print(np.sign(diff)) return dbc.Table( [ html.Tbody(", ") ] ), dbc.Row( [ dbc.Col( children=[ # Top card", "\"32\", 64: \"64\", 128: \"128\", 256: \"256\", }, # className=\"text-center", "data.index])])] return dbc.Table(header + body) def other_options(data): contents = []", "for i in range(len(others) + 2, 6)] tooltips = tooltips", "= (aspirations - relevant_data) / (ideal - nadir) distance =", "\"Choose desired RAM capacity (GB)\", html_for=\"ram-choice\", ), dcc.Slider( id=\"ram-choice\", min=2,", "= [] ids = [] i = 2 for index,", "html.P( ( \"This app uses decision support tools to \"", "\"12\", }, className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\",", "id=\"other-results\", children=( [ html.P( html.Span( f\"{i}. \", id=f\"other-results-list-{i}\", ) )", "html.H1( children=\"What is your optimal phone?\", className=\"text-center mt-4\", ) )", "\"overflow\": \"auto\"}, ), ], width={\"size\": 5, \"offset\": 1}, ), dbc.Col(", "== \"Android\": choice_data = data[[True if \"Android\" in st else", "= other_options(card_data.loc[distance_order.values[1:5]]) else: others, tooltips = other_options( card_data.loc[distance_order.values[1:total_number]] ) others", "\"RAM\": -1, \"Camera (MP)\": -1, \"Price (Euros)\": 1, } fitness_data", "id=f\"other-results-list-{i}\", ) ) for i in range(2, 6) ] +", "dash_bootstrap_components as dbc import dash_table import plotly.express as ex import", "11: \"11\", 12: \"12\", }, className=\"text-center mt-5\", ), ], className=\"mr-3", "children=[ dbc.CardBody( [ html.H4( \"Researcher's Night Event\", className=\"card-title text-center\", ),", "dash_core_components as dcc import dash_html_components as html from dash.dependencies import", "\"10\", 30: \"30\", 50: \"50\", 70: \"70\", 90: \"90\", 110:", "700, \"background-color\": \"white\", \"color\": \"white\", \"border-style\": \"solid\", \"border-color\": \"black\", },", "mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired Memory capacity (GB)\",", "return dbc.Table(header + body) def other_options(data): contents = [] tables", "np.asarray([-1, -1, -1, 1]) relevant_data = relevant_data * maxi ideal", "marks={ 2: \"2\", 3: \"3\", 4: \"4\", 5: \"5\", 6:", "plotly.graph_objects as go import pandas as pd import numpy as", "= tooltips + [None for i in range(len(tooltips) + 2,", "= [None, None, None] + [\"green\" if x >= 0", ") ) ] ), dbc.Row( [ dbc.Col( children=[ # Top", "mb-2 mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired RAM capacity", "] ) def table_from_data_horizontal(data): header = [html.Thead(html.Tr([html.Th(col) for col in", "Event\", className=\"card-title text-center\", ), html.P( ( \"This app uses decision", "c) in zip(data.index, colors) ] ) ] ) def table_from_data_horizontal(data):", "header = [html.Thead(html.Tr([html.Th(col) for col in data.index]))] body = [html.Tbody([html.Tr([html.Td(data[col])", "\"quickly and easily find phones which reflect \" \"the user's", "+ [f\"{i}. -\" for i in range(len(others) + 2, 6)]", "[Input(\"callback-dump\", \"children\")]) def tooltips(tooldict): num = len(tooldict[\"ids\"]) content = []", "if total_number >= 4: others, tooltips = other_options(card_data.loc[distance_order.values[1:5]]) else: others,", "1]) relevant_data = relevant_data * maxi ideal = relevant_data.min().values nadir", "= choices[1:] * maxi distance = (aspirations - relevant_data) /", "\"70\", 90: \"90\", 110: \"110\", 130: \"130\", }, className=\"text-center mt-5\",", "\"3\", 4: \"4\", 5: \"5\", 6: \"6\", 7: \"7\", 8:", "(Euros)\": 1, } fitness_data = data[fitness_columns] * maxi[fitness_columns].values external_stylesheets =", "(MP)\", \"Price (Euros)\"] # print(data[to_compare].values) diff = (data[to_compare].values - choices)", "html.Th(col), html.Td([str(data[col]),],), html.Td([html.Span(\" ▉\", style={\"color\": c,},)],), ] ) for (col,", "choices): # print(choices) to_compare = [\"Memory\", \"RAM\", \"Camera (MP)\", \"Price", "}, # className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\",", "* maxi ideal = relevant_data.min().values nadir = relevant_data.max().values aspirations =", "className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.FormGroup(", "= others + [f\"{i}. -\" for i in range(len(others) +", "[ dbc.Col( html.H1( children=\"What is your optimal phone?\", className=\"text-center mt-4\",", "return content\"\"\" def table_from_data(data, choices): # print(choices) to_compare = [\"Memory\",", "\"1200\", 1400: \"1400\", }, className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3", "), dcc.Slider( id=\"cam-choice\", min=0, max=130, step=1, included=False, value=70, marks={ 0:", "value=100, marks={ 0: \"0\", 200: \"200\", 400: \"400\", 600: \"600\",", "5: \"5\", 6: \"6\", 7: \"7\", 8: \"8\", 9: \"9\",", "dcc.Slider( id=\"cam-choice\", min=0, max=130, step=1, included=False, value=70, marks={ 0: \"0\",", "desires. Input your preferences \" \"below. The box on top", "if choices[0] == \"Android\": choice_data = data[[True if \"Android\" in", "* maxi[fitness_columns].values external_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"] app = dash.Dash( __name__, external_stylesheets=[dbc.themes.LITERA],", "1000: \"1000\", 1200: \"1200\", 1400: \"1400\", }, className=\"text-center mt-5\", ),", "external_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"] app = dash.Dash( __name__, external_stylesheets=[dbc.themes.LITERA], eager_loading=True, suppress_callback_exceptions=True,", "\"0\", 10: \"10\", 30: \"30\", 50: \"50\", 70: \"70\", 90:", "] ) ], className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.Form( [", "max=1400, step=1, included=False, value=100, marks={ 0: \"0\", 200: \"200\", 400:", "1, } fitness_data = data[fitness_columns] * maxi[fitness_columns].values external_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"]", "provides some \" \"close alternatives.\" ), className=\"card-text\", ), ] )", "= data[[True if \"Android\" in st else False for st", "else: others, tooltips = other_options( card_data.loc[distance_order.values[1:total_number]] ) others = others", "header=0) details = pd.read_csv(\"./data/Phone_details.csv\", header=0) names = details.loc[0] data =", "tables.append(table_from_data_horizontal(row)) i = i + 1 return contents, tables if", "), dbc.FormGroup( children=[ dbc.Label( \"Choose desired RAM capacity (GB)\", html_for=\"ram-choice\",", "dbc import dash_table import plotly.express as ex import plotly.graph_objects as", "for i in range(num): content.append(dbc.Tooltip(tooldict[\"tables\"][i], target=tooldict[\"ids\"][i])) return content\"\"\" def table_from_data(data,", "pd.read_csv(\"./data/Phone_dataset_new.csv\", header=0) details = pd.read_csv(\"./data/Phone_details.csv\", header=0) names = details.loc[0] data", "className=\"text-center mt-4\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.FormGroup(", "1200: \"1200\", 1400: \"1400\", }, className=\"text-center mt-5\", ), ], className=\"mr-3", "[ Output(\"results\", \"children\"), *[Output(f\"other-results-list-{i}\", \"children\") for i in range(2, 6)],", "def table_from_data_horizontal(data): header = [html.Thead(html.Tr([html.Th(col) for col in data.index]))] body", "\"Camera (MP)\", \"Price (Euros)\",] ].reset_index(drop=True) card_data = choice_data[details_on_card].reset_index(drop=True) maxi =", "\", id=f\"other-results-list-{i}\", ) ) for i in range(2, 6) ]", "range(2, 6) ] + [ dbc.Tooltip( id=f\"other-results-tooltip-{i}\", target=f\"other-results-list-{i}\", placement=\"right\", style={", "html.Div(id=\"tooltips\"), ], width={\"size\": 5, \"offset\": 0}, className=\"mb-2 mt-2\", ), ]", "(Euros)\"] # print(data[to_compare].values) diff = (data[to_compare].values - choices) * [1,", "= [html.Tbody([html.Tr([html.Td(data[col]) for col in data.index])])] return dbc.Table(header + body)", "details(?) dbc.Card( children=[ dbc.CardBody( [ html.H4( \"Researcher's Night Event\", className=\"card-title", "mt-2\", ), ], style={\"maxHeight\": \"560px\", \"overflow\": \"auto\"}, ), ], width={\"size\":", "attr in [\"os\", \"memory\", \"ram\", \"cam\", \"cost\"] ], ) def", "pandas as pd import numpy as np data = pd.read_csv(\"./data/Phone_dataset_new.csv\",", "colors) ] ) ] ) def table_from_data_horizontal(data): header = [html.Thead(html.Tr([html.Th(col)", "[\"Memory\", \"RAM\", \"Camera (MP)\", \"Price (Euros)\",] ].reset_index(drop=True) card_data = choice_data[details_on_card].reset_index(drop=True)", "PreventUpdate import dash_core_components as dcc import dash_html_components as html from", "if \"IOS\" in st else False for st in data[\"OS\"]]]", "), ], width={\"size\": 5, \"offset\": 1}, ), dbc.Col( children=[ dbc.Card(", "▉\", style={\"color\": c,},)],), ] ) for (col, c) in zip(data.index,", "choices[1:]) total_number = len(distance_order) if total_number >= 4: others, tooltips", "import plotly.express as ex import plotly.graph_objects as go import pandas", "\"200\", 400: \"400\", 600: \"600\", 800: \"800\", 1000: \"1000\", 1200:", "decision support tools to \" \"quickly and easily find phones", "max=12, step=1, value=12, included=False, marks={ 2: \"2\", 3: \"3\", 4:", "range(2, 6)], ], [ Input(f\"{attr}-choice\", \"value\") for attr in [\"os\",", "[None, None, None] + [\"green\" if x >= 0 else", "dash_html_components as html from dash.dependencies import Input, Output, State import", "operating system\", html_for=\"os-choice\", ), dbc.RadioItems( options=[ { \"label\": \"Android\", \"value\":", "id=f\"other-results-tooltip-{i}\", target=f\"other-results-list-{i}\", placement=\"right\", style={ \"maxWidth\": 700, \"background-color\": \"white\", \"color\": \"white\",", "ideal = relevant_data.min().values nadir = relevant_data.max().values aspirations = choices[1:] *", "# print(np.sign(diff)) return dbc.Table( [ html.Tbody( [ html.Tr( [ html.Th(col),", "data = data.rename(columns=names) details = details.rename(columns=names) maxi = details.loc[1].astype(int) details_on_card", "best. \" \"The box on bottom right provides some \"", "import dash_bootstrap_components as dbc import dash_table import plotly.express as ex", "dcc.Slider( id=\"memory-choice\", min=16, max=256, step=None, included=False, value=256, marks={ 16: \"16\",", "Input(f\"{attr}-choice\", \"value\") for attr in [\"os\", \"memory\", \"ram\", \"cam\", \"cost\"]", "\"children\"), *[Output(f\"other-results-list-{i}\", \"children\") for i in range(2, 6)], *[Output(f\"other-results-tooltip-{i}\", \"children\")", "data[\"OS\"]]] if choices[0] == \"Android\": choice_data = data[[True if \"Android\"", "user's desires. Input your preferences \" \"below. The box on", "html.P( html.Span( f\"{i}. \", id=f\"other-results-list-{i}\", ) ) for i in", "find phones which reflect \" \"the user's desires. Input your", "data[\"OS\"]]] relevant_data = choice_data[ [\"Memory\", \"RAM\", \"Camera (MP)\", \"Price (Euros)\",]", "card_data.loc[distance_order.values[1:total_number]] ) others = others + [f\"{i}. -\" for i", "[\"Memory\", \"RAM\", \"Camera (MP)\", \"Price (Euros)\"] # print(data[to_compare].values) diff =", "\"128\", 256: \"256\", }, # className=\"text-center mt-5\", ), ], className=\"mr-3", "\"Price (Euros)\": 1, } fitness_data = data[fitness_columns] * maxi[fitness_columns].values external_stylesheets", "st in data[\"OS\"]]] relevant_data = choice_data[ [\"Memory\", \"RAM\", \"Camera (MP)\",", "# print(data[to_compare].values) diff = (data[to_compare].values - choices) * [1, 1,", "dcc import dash_html_components as html from dash.dependencies import Input, Output,", ") for i in range(2, 6) ] ), ), ],", "), dbc.Card( children=[ dbc.CardHeader(\"Other great phones:\"), dbc.CardBody( id=\"other-results\", children=( [", "= len(distance_order) if total_number >= 4: others, tooltips = other_options(card_data.loc[distance_order.values[1:5]])", "dbc.Card( children=[ dbc.CardHeader(\"Other great phones:\"), dbc.CardBody( id=\"other-results\", children=( [ html.P(", "= choice_data[details_on_card].reset_index(drop=True) maxi = np.asarray([-1, -1, -1, 1]) relevant_data =", "print(data[to_compare].values) diff = (data[to_compare].values - choices) * [1, 1, 1,", "len(tooldict[\"ids\"]) content = [] for i in range(num): content.append(dbc.Tooltip(tooldict[\"tables\"][i], target=tooldict[\"ids\"][i]))", "as np data = pd.read_csv(\"./data/Phone_dataset_new.csv\", header=0) details = pd.read_csv(\"./data/Phone_details.csv\", header=0)", "12: \"12\", }, className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2", "for index, row in data.iterrows(): contents.append(f\"{i}. {row['Model']}\") tables.append(table_from_data_horizontal(row)) i =", "names = details.loc[0] data = data.rename(columns=names) details = details.rename(columns=names) maxi", "= data.rename(columns=names) details = details.rename(columns=names) maxi = details.loc[1].astype(int) details_on_card =", "dbc.Card( children=[ dbc.CardHeader(\"The best phone for you is:\"), dbc.CardBody(id=\"results\"), ],", "for st in data[\"OS\"]]] relevant_data = choice_data[ [\"Memory\", \"RAM\", \"Camera", "x in diff] # print(np.sign(diff)) return dbc.Table( [ html.Tbody( [", "4: \"4\", 5: \"5\", 6: \"6\", 7: \"7\", 8: \"8\",", "id=\"memory-choice\", min=16, max=256, step=None, included=False, value=256, marks={ 16: \"16\", 32:", "800: \"800\", 1000: \"1000\", 1200: \"1200\", 1400: \"1400\", }, className=\"text-center", "dbc.Col( children=[ dbc.Card( children=[ dbc.CardHeader(\"The best phone for you is:\"),", "\"children\")]) def tooltips(tooldict): num = len(tooldict[\"ids\"]) content = [] for", "\"Price (Euros)\",] ].reset_index(drop=True) card_data = choice_data[details_on_card].reset_index(drop=True) maxi = np.asarray([-1, -1,", "None, None] + [\"green\" if x >= 0 else \"red\"", "dbc.CardBody( id=\"other-results\", children=( [ html.P( html.Span( f\"{i}. \", id=f\"other-results-list-{i}\", )", "contents = [] tables = [] ids = [] i", "relevant_data * maxi ideal = relevant_data.min().values nadir = relevant_data.max().values aspirations", "), ] ) ], className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.Form(", "min=16, max=256, step=None, included=False, value=256, marks={ 16: \"16\", 32: \"32\",", "fitness_columns = { \"Memory\": -1, \"RAM\": -1, \"Camera (MP)\": -1,", "html_for=\"cam-choice\", ), dcc.Slider( id=\"cam-choice\", min=0, max=130, step=1, included=False, value=70, marks={", "else False for st in data[\"OS\"]]] relevant_data = choice_data[ [\"Memory\",", "), dbc.Row( [ dbc.Col( children=[ # Top card with details(?)", "mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired budget (Euros)\", html_for=\"cost-choice\",", "{ \"label\": \"No preference\", \"value\": \"both\", }, ], id=\"os-choice\", value=\"both\",", "False for st in data[\"OS\"]]] relevant_data = choice_data[ [\"Memory\", \"RAM\",", "[html.Tbody([html.Tr([html.Td(data[col]) for col in data.index])])] return dbc.Table(header + body) def", "dbc.CardHeader(\"Other great phones:\"), dbc.CardBody( id=\"other-results\", children=( [ html.P( html.Span( f\"{i}.", "dbc.Form( [ dbc.FormGroup( children=[ dbc.Label( \"Choose desired operating system\", html_for=\"os-choice\",", "step=1, value=12, included=False, marks={ 2: \"2\", 3: \"3\", 4: \"4\",", "details.loc[1].astype(int) details_on_card = details.loc[2].astype(int) details_on_card = details.columns[details_on_card == 1] fitness_columns", "+ [None for i in range(len(tooltips) + 2, 6)] return", "for col in data.index])])] return dbc.Table(header + body) def other_options(data):", "\"white\", \"border-style\": \"solid\", \"border-color\": \"black\", }, ) for i in", "] ), dbc.Row([html.Div(id=\"callback-dump\")]), ], ) @app.callback( [ Output(\"results\", \"children\"), *[Output(f\"other-results-list-{i}\",", "dbc.Col( html.H1( children=\"What is your optimal phone?\", className=\"text-center mt-4\", )", "def tooltips(tooldict): num = len(tooldict[\"ids\"]) content = [] for i", "200: \"200\", 400: \"400\", 600: \"600\", 800: \"800\", 1000: \"1000\",", "+ 1 return contents, tables if __name__ == \"__main__\": app.run_server(debug=False)", "\"\"\"@app.callback(Output(\"tooltips\", \"children\"), [Input(\"callback-dump\", \"children\")]) def tooltips(tooldict): num = len(tooldict[\"ids\"]) content", "= [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"] app = dash.Dash( __name__, external_stylesheets=[dbc.themes.LITERA], eager_loading=True, suppress_callback_exceptions=True, )", "children=[ dbc.Label( \"Choose desired operating system\", html_for=\"os-choice\", ), dbc.RadioItems( options=[", "[\"https://codepen.io/chriddyp/pen/bWLwgP.css\"] app = dash.Dash( __name__, external_stylesheets=[dbc.themes.LITERA], eager_loading=True, suppress_callback_exceptions=True, ) app.layout", "\"label\": \"Android\", \"value\": \"Android\", }, {\"label\": \"iOS\", \"value\": \"IOS\"}, {", "details.loc[2].astype(int) details_on_card = details.columns[details_on_card == 1] fitness_columns = { \"Memory\":", "64: \"64\", 128: \"128\", 256: \"256\", }, # className=\"text-center mt-5\",", "6) ] + [ dbc.Tooltip( id=f\"other-results-tooltip-{i}\", target=f\"other-results-list-{i}\", placement=\"right\", style={ \"maxWidth\":", "1}, ), dbc.Col( children=[ dbc.Card( children=[ dbc.CardHeader(\"The best phone for", "Night Event\", className=\"card-title text-center\", ), html.P( ( \"This app uses", "in st else False for st in data[\"OS\"]]] if choices[0]", "dbc.CardHeader(\"The best phone for you is:\"), dbc.CardBody(id=\"results\"), ], className=\"mb-4\", ),", "\"offset\": 1}, ), dbc.Col( children=[ dbc.Card( children=[ dbc.CardHeader(\"The best phone", "] ), ), ], className=\"mt-4\", ), html.Div(id=\"tooltips\"), ], width={\"size\": 5,", "[ html.Th(col), html.Td([str(data[col]),],), html.Td([html.Span(\" ▉\", style={\"color\": c,},)],), ] ) for", "range(num): content.append(dbc.Tooltip(tooldict[\"tables\"][i], target=tooldict[\"ids\"][i])) return content\"\"\" def table_from_data(data, choices): # print(choices)", "= details.loc[2].astype(int) details_on_card = details.columns[details_on_card == 1] fitness_columns = {", "] ) for (col, c) in zip(data.index, colors) ] )", "phone \" \"which matches the preferences the best. \" \"The", "range(2, 6)], *[Output(f\"other-results-tooltip-{i}\", \"children\") for i in range(2, 6)], ],", "# className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\", ),", "for i in range(2, 6)], ], [ Input(f\"{attr}-choice\", \"value\") for", "= relevant_data.max().values aspirations = choices[1:] * maxi distance = (aspirations", "), dcc.Slider( id=\"ram-choice\", min=2, max=12, step=1, value=12, included=False, marks={ 2:", "id=\"os-choice\", value=\"both\", inline=True, # className=\"text-center mt-4\", ), ], className=\"mr-3 ml-3", "else \"red\" for x in diff] # print(np.sign(diff)) return dbc.Table(", "= data[fitness_columns] * maxi[fitness_columns].values external_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"] app = dash.Dash(", "4: others, tooltips = other_options(card_data.loc[distance_order.values[1:5]]) else: others, tooltips = other_options(", "8: \"8\", 9: \"9\", 10: \"10\", 11: \"11\", 12: \"12\",", "target=f\"other-results-list-{i}\", placement=\"right\", style={ \"maxWidth\": 700, \"background-color\": \"white\", \"color\": \"white\", \"border-style\":", "diff = (data[to_compare].values - choices) * [1, 1, 1, -1]", "phones which reflect \" \"the user's desires. Input your preferences", "tooltips = other_options(card_data.loc[distance_order.values[1:5]]) else: others, tooltips = other_options( card_data.loc[distance_order.values[1:total_number]] )", "0}, className=\"mb-2 mt-2\", ), ] ), dbc.Row([html.Div(id=\"callback-dump\")]), ], ) @app.callback(", "width={\"size\": 5, \"offset\": 1}, ), dbc.Col( children=[ dbc.Card( children=[ dbc.CardHeader(\"The", "\" \"quickly and easily find phones which reflect \" \"the", ") def table_from_data_horizontal(data): header = [html.Thead(html.Tr([html.Th(col) for col in data.index]))]", "0: \"0\", 200: \"200\", 400: \"400\", 600: \"600\", 800: \"800\",", ") for i in range(2, 6) ] + [ dbc.Tooltip(", "fitness_data = data[fitness_columns] * maxi[fitness_columns].values external_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"] app =", "dbc.FormGroup( children=[ dbc.Label( \"Choose desired Memory capacity (GB)\", html_for=\"memory-choice\", ),", "), html.Div(id=\"tooltips\"), ], width={\"size\": 5, \"offset\": 0}, className=\"mb-2 mt-2\", ),", "col in data.index])])] return dbc.Table(header + body) def other_options(data): contents", "the preferences the best. \" \"The box on bottom right", "dbc.Label( \"Choose desired Memory capacity (GB)\", html_for=\"memory-choice\", ), dcc.Slider( id=\"memory-choice\",", "= pd.read_csv(\"./data/Phone_dataset_new.csv\", header=0) details = pd.read_csv(\"./data/Phone_details.csv\", header=0) names = details.loc[0]", "0: \"0\", 10: \"10\", 30: \"30\", 50: \"50\", 70: \"70\",", "pd.read_csv(\"./data/Phone_details.csv\", header=0) names = details.loc[0] data = data.rename(columns=names) details =", "], className=\"mt-4\", ), html.Div(id=\"tooltips\"), ], width={\"size\": 5, \"offset\": 0}, className=\"mb-2", "i in range(2, 6) ] + [ dbc.Tooltip( id=f\"other-results-tooltip-{i}\", target=f\"other-results-list-{i}\",", "best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:]) total_number = len(distance_order) if total_number >=", "tools to \" \"quickly and easily find phones which reflect", "\"560px\", \"overflow\": \"auto\"}, ), ], width={\"size\": 5, \"offset\": 1}, ),", "top right shows the phone \" \"which matches the preferences", "is your optimal phone?\", className=\"text-center mt-4\", ) ) ] ),", "on top right shows the phone \" \"which matches the", "dbc.Label( \"Choose desired budget (Euros)\", html_for=\"cost-choice\", ), dcc.Slider( id=\"cost-choice\", min=0,", "in range(len(others) + 2, 6)] tooltips = tooltips + [None", "row in data.iterrows(): contents.append(f\"{i}. {row['Model']}\") tables.append(table_from_data_horizontal(row)) i = i +", "total_number >= 4: others, tooltips = other_options(card_data.loc[distance_order.values[1:5]]) else: others, tooltips", "\" \"close alternatives.\" ), className=\"card-text\", ), ] ) ], className=\"mr-3", "10: \"10\", 30: \"30\", 50: \"50\", 70: \"70\", 90: \"90\",", "__name__, external_stylesheets=[dbc.themes.LITERA], eager_loading=True, suppress_callback_exceptions=True, ) app.layout = html.Div( children=[ #", "details.columns[details_on_card == 1] fitness_columns = { \"Memory\": -1, \"RAM\": -1,", "data[fitness_columns] * maxi[fitness_columns].values external_stylesheets = [\"https://codepen.io/chriddyp/pen/bWLwgP.css\"] app = dash.Dash( __name__,", "html_for=\"memory-choice\", ), dcc.Slider( id=\"memory-choice\", min=16, max=256, step=None, included=False, value=256, marks={", "\"9\", 10: \"10\", 11: \"11\", 12: \"12\", }, className=\"text-center mt-5\",", "], ) @app.callback( [ Output(\"results\", \"children\"), *[Output(f\"other-results-list-{i}\", \"children\") for i", "distance.max(axis=1) distance_order = np.argsort(distance) best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:]) total_number =", "dbc.Row( [ dbc.Col( children=[ # Top card with details(?) dbc.Card(", "\"Camera (MP)\", \"Price (Euros)\"] # print(data[to_compare].values) diff = (data[to_compare].values -", "1, 1, -1] colors = [None, None, None] + [\"green\"", "-1] colors = [None, None, None] + [\"green\" if x", "return (best, *others, *tooltips) \"\"\"@app.callback(Output(\"tooltips\", \"children\"), [Input(\"callback-dump\", \"children\")]) def tooltips(tooldict):", "i in range(2, 6) ] ), ), ], className=\"mt-4\", ),", "box on bottom right provides some \" \"close alternatives.\" ),", "is fixed, .container.scalable is scalable dbc.Row( [ dbc.Col( html.H1( children=\"What", ") ], className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.Form( [ dbc.FormGroup(", "mt-2\", ), ] ), dbc.Row([html.Div(id=\"callback-dump\")]), ], ) @app.callback( [ Output(\"results\",", "budget (Euros)\", html_for=\"cost-choice\", ), dcc.Slider( id=\"cost-choice\", min=0, max=1400, step=1, included=False,", "\" \"which matches the preferences the best. \" \"The box", "desired camera resolution (MP)\", html_for=\"cam-choice\", ), dcc.Slider( id=\"cam-choice\", min=0, max=130,", "uses decision support tools to \" \"quickly and easily find", "2: \"2\", 3: \"3\", 4: \"4\", 5: \"5\", 6: \"6\",", "# print(choices) to_compare = [\"Memory\", \"RAM\", \"Camera (MP)\", \"Price (Euros)\"]", "choices[0] == \"Android\": choice_data = data[[True if \"Android\" in st", "\"Android\": choice_data = data[[True if \"Android\" in st else False", "choice_data[details_on_card].reset_index(drop=True) maxi = np.asarray([-1, -1, -1, 1]) relevant_data = relevant_data", "\"30\", 50: \"50\", 70: \"70\", 90: \"90\", 110: \"110\", 130:", "= html.Div( children=[ # .container class is fixed, .container.scalable is", "return dbc.Table( [ html.Tbody( [ html.Tr( [ html.Th(col), html.Td([str(data[col]),],), html.Td([html.Span(\"", "\"Choose desired operating system\", html_for=\"os-choice\", ), dbc.RadioItems( options=[ { \"label\":", "as dbc import dash_table import plotly.express as ex import plotly.graph_objects", "\"white\", \"color\": \"white\", \"border-style\": \"solid\", \"border-color\": \"black\", }, ) for", "\"value\": \"both\", }, ], id=\"os-choice\", value=\"both\", inline=True, # className=\"text-center mt-4\",", "= [html.Thead(html.Tr([html.Th(col) for col in data.index]))] body = [html.Tbody([html.Tr([html.Td(data[col]) for", "maxi = np.asarray([-1, -1, -1, 1]) relevant_data = relevant_data *", "dbc.FormGroup( children=[ dbc.Label( \"Choose desired budget (Euros)\", html_for=\"cost-choice\", ), dcc.Slider(", ") app.layout = html.Div( children=[ # .container class is fixed,", "included=False, value=70, marks={ 0: \"0\", 10: \"10\", 30: \"30\", 50:", "[ dbc.Tooltip( id=f\"other-results-tooltip-{i}\", target=f\"other-results-list-{i}\", placement=\"right\", style={ \"maxWidth\": 700, \"background-color\": \"white\",", "[] i = 2 for index, row in data.iterrows(): contents.append(f\"{i}.", "fixed, .container.scalable is scalable dbc.Row( [ dbc.Col( html.H1( children=\"What is", "dbc.Label( \"Choose desired operating system\", html_for=\"os-choice\", ), dbc.RadioItems( options=[ {", "(MP)\", html_for=\"cam-choice\", ), dcc.Slider( id=\"cam-choice\", min=0, max=130, step=1, included=False, value=70,", "marks={ 16: \"16\", 32: \"32\", 64: \"64\", 128: \"128\", 256:", "in range(2, 6)], ], [ Input(f\"{attr}-choice\", \"value\") for attr in", "data.rename(columns=names) details = details.rename(columns=names) maxi = details.loc[1].astype(int) details_on_card = details.loc[2].astype(int)", "className=\"mb-4\", ), dbc.Card( children=[ dbc.CardHeader(\"Other great phones:\"), dbc.CardBody( id=\"other-results\", children=(", "for attr in [\"os\", \"memory\", \"ram\", \"cam\", \"cost\"] ], )", "children=[ dbc.Label( \"Choose desired Memory capacity (GB)\", html_for=\"memory-choice\", ), dcc.Slider(", "\"800\", 1000: \"1000\", 1200: \"1200\", 1400: \"1400\", }, className=\"text-center mt-5\",", "\"IOS\": choice_data = data[[True if \"IOS\" in st else False", "(MP)\": -1, \"Price (Euros)\": 1, } fitness_data = data[fitness_columns] *", "maxi = details.loc[1].astype(int) details_on_card = details.loc[2].astype(int) details_on_card = details.columns[details_on_card ==", "9: \"9\", 10: \"10\", 11: \"11\", 12: \"12\", }, className=\"text-center", "6)], *[Output(f\"other-results-tooltip-{i}\", \"children\") for i in range(2, 6)], ], [", "details_on_card = details.loc[2].astype(int) details_on_card = details.columns[details_on_card == 1] fitness_columns =", ".container class is fixed, .container.scalable is scalable dbc.Row( [ dbc.Col(", "included=False, value=100, marks={ 0: \"0\", 200: \"200\", 400: \"400\", 600:", "relevant_data.max().values aspirations = choices[1:] * maxi distance = (aspirations -", "{ \"label\": \"Android\", \"value\": \"Android\", }, {\"label\": \"iOS\", \"value\": \"IOS\"},", "[\"os\", \"memory\", \"ram\", \"cam\", \"cost\"] ], ) def results(*choices): if", "html.Tr( [ html.Th(col), html.Td([str(data[col]),],), html.Td([html.Span(\" ▉\", style={\"color\": c,},)],), ] )", "mt-4\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.FormGroup( children=[", "others, tooltips = other_options( card_data.loc[distance_order.values[1:total_number]] ) others = others +", "tooltips(tooldict): num = len(tooldict[\"ids\"]) content = [] for i in", "\"the user's desires. Input your preferences \" \"below. The box", "content = [] for i in range(num): content.append(dbc.Tooltip(tooldict[\"tables\"][i], target=tooldict[\"ids\"][i])) return", "= distance.max(axis=1) distance_order = np.argsort(distance) best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:]) total_number", "128: \"128\", 256: \"256\", }, # className=\"text-center mt-5\", ), ],", "body) def other_options(data): contents = [] tables = [] ids", "\"50\", 70: \"70\", 90: \"90\", 110: \"110\", 130: \"130\", },", "for i in range(2, 6) ] + [ dbc.Tooltip( id=f\"other-results-tooltip-{i}\",", "desired Memory capacity (GB)\", html_for=\"memory-choice\", ), dcc.Slider( id=\"memory-choice\", min=16, max=256,", "Input, Output, State import dash_bootstrap_components as dbc import dash_table import", "30: \"30\", 50: \"50\", 70: \"70\", 90: \"90\", 110: \"110\",", "mb-2 mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired camera resolution", "content.append(dbc.Tooltip(tooldict[\"tables\"][i], target=tooldict[\"ids\"][i])) return content\"\"\" def table_from_data(data, choices): # print(choices) to_compare", "dbc.Tooltip( id=f\"other-results-tooltip-{i}\", target=f\"other-results-list-{i}\", placement=\"right\", style={ \"maxWidth\": 700, \"background-color\": \"white\", \"color\":", "= [] for i in range(num): content.append(dbc.Tooltip(tooldict[\"tables\"][i], target=tooldict[\"ids\"][i])) return content\"\"\"", "}, {\"label\": \"iOS\", \"value\": \"IOS\"}, { \"label\": \"No preference\", \"value\":", "for st in data[\"OS\"]]] if choices[0] == \"Android\": choice_data =", "), className=\"card-text\", ), ] ) ], className=\"mr-3 ml-3 mb-2 mt-2\",", "card_data = choice_data[details_on_card].reset_index(drop=True) maxi = np.asarray([-1, -1, -1, 1]) relevant_data", "i in range(len(others) + 2, 6)] tooltips = tooltips +", "import numpy as np data = pd.read_csv(\"./data/Phone_dataset_new.csv\", header=0) details =", "min=2, max=12, step=1, value=12, included=False, marks={ 2: \"2\", 3: \"3\",", "to \" \"quickly and easily find phones which reflect \"", "phone?\", className=\"text-center mt-4\", ) ) ] ), dbc.Row( [ dbc.Col(", "( \"This app uses decision support tools to \" \"quickly", "\" \"below. The box on top right shows the phone", "\"both\", }, ], id=\"os-choice\", value=\"both\", inline=True, # className=\"text-center mt-4\", ),", "dcc.Slider( id=\"cost-choice\", min=0, max=1400, step=1, included=False, value=100, marks={ 0: \"0\",", "mb-2 mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired Memory capacity", "16: \"16\", 32: \"32\", 64: \"64\", 128: \"128\", 256: \"256\",", "for col in data.index]))] body = [html.Tbody([html.Tr([html.Td(data[col]) for col in", "\"black\", }, ) for i in range(2, 6) ] ),", "className=\"mr-3 ml-3 mb-2 mt-2\", ), ], style={\"maxHeight\": \"560px\", \"overflow\": \"auto\"},", "np data = pd.read_csv(\"./data/Phone_dataset_new.csv\", header=0) details = pd.read_csv(\"./data/Phone_details.csv\", header=0) names", "= 2 for index, row in data.iterrows(): contents.append(f\"{i}. {row['Model']}\") tables.append(table_from_data_horizontal(row))", "choices[1:] * maxi distance = (aspirations - relevant_data) / (ideal", "\"below. The box on top right shows the phone \"", "for you is:\"), dbc.CardBody(id=\"results\"), ], className=\"mb-4\", ), dbc.Card( children=[ dbc.CardHeader(\"Other", "= len(tooldict[\"ids\"]) content = [] for i in range(num): content.append(dbc.Tooltip(tooldict[\"tables\"][i],", "you is:\"), dbc.CardBody(id=\"results\"), ], className=\"mb-4\", ), dbc.Card( children=[ dbc.CardHeader(\"Other great", "reflect \" \"the user's desires. Input your preferences \" \"below.", "(best, *others, *tooltips) \"\"\"@app.callback(Output(\"tooltips\", \"children\"), [Input(\"callback-dump\", \"children\")]) def tooltips(tooldict): num", "f\"{i}. \", id=f\"other-results-list-{i}\", ) ) for i in range(2, 6)", "in range(2, 6) ] + [ dbc.Tooltip( id=f\"other-results-tooltip-{i}\", target=f\"other-results-list-{i}\", placement=\"right\",", "others = others + [f\"{i}. -\" for i in range(len(others)", "dbc.RadioItems( options=[ { \"label\": \"Android\", \"value\": \"Android\", }, {\"label\": \"iOS\",", "\"11\", 12: \"12\", }, className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3", "# Top card with details(?) dbc.Card( children=[ dbc.CardBody( [ html.H4(", "== 1] fitness_columns = { \"Memory\": -1, \"RAM\": -1, \"Camera", "= data elif choices[0] == \"IOS\": choice_data = data[[True if", "relevant_data = relevant_data * maxi ideal = relevant_data.min().values nadir =", "range(len(tooltips) + 2, 6)] return (best, *others, *tooltips) \"\"\"@app.callback(Output(\"tooltips\", \"children\"),", "[1, 1, 1, -1] colors = [None, None, None] +", "children=[ dbc.Card( children=[ dbc.CardHeader(\"The best phone for you is:\"), dbc.CardBody(id=\"results\"),", "num = len(tooldict[\"ids\"]) content = [] for i in range(num):", "], width={\"size\": 5, \"offset\": 0}, className=\"mb-2 mt-2\", ), ] ),", "zip(data.index, colors) ] ) ] ) def table_from_data_horizontal(data): header =", "- relevant_data) / (ideal - nadir) distance = distance.max(axis=1) distance_order", "i = 2 for index, row in data.iterrows(): contents.append(f\"{i}. {row['Model']}\")", "range(len(others) + 2, 6)] tooltips = tooltips + [None for", "[] ids = [] i = 2 for index, row", "in data[\"OS\"]]] relevant_data = choice_data[ [\"Memory\", \"RAM\", \"Camera (MP)\", \"Price", "choices[0] == \"both\": choice_data = data elif choices[0] == \"IOS\":", "others, tooltips = other_options(card_data.loc[distance_order.values[1:5]]) else: others, tooltips = other_options( card_data.loc[distance_order.values[1:total_number]]", "\"value\": \"Android\", }, {\"label\": \"iOS\", \"value\": \"IOS\"}, { \"label\": \"No", "6)] return (best, *others, *tooltips) \"\"\"@app.callback(Output(\"tooltips\", \"children\"), [Input(\"callback-dump\", \"children\")]) def", "] ) ] ) def table_from_data_horizontal(data): header = [html.Thead(html.Tr([html.Th(col) for", "className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.Form( [ dbc.FormGroup( children=[ dbc.Label(", "for i in range(len(tooltips) + 2, 6)] return (best, *others,", "capacity (GB)\", html_for=\"ram-choice\", ), dcc.Slider( id=\"ram-choice\", min=2, max=12, step=1, value=12,", "camera resolution (MP)\", html_for=\"cam-choice\", ), dcc.Slider( id=\"cam-choice\", min=0, max=130, step=1,", "\"600\", 800: \"800\", 1000: \"1000\", 1200: \"1200\", 1400: \"1400\", },", "# className=\"text-center mt-4\", ), ], className=\"mr-3 ml-3 mb-2 mt-2\", ),", ") ] ) def table_from_data_horizontal(data): header = [html.Thead(html.Tr([html.Th(col) for col", "[ html.H4( \"Researcher's Night Event\", className=\"card-title text-center\", ), html.P( (", "- nadir) distance = distance.max(axis=1) distance_order = np.argsort(distance) best =", "130: \"130\", }, className=\"text-center mt-5\", ), ], className=\"mr-3 ml-3 mb-2", "] + [ dbc.Tooltip( id=f\"other-results-tooltip-{i}\", target=f\"other-results-list-{i}\", placement=\"right\", style={ \"maxWidth\": 700,", "2, 6)] return (best, *others, *tooltips) \"\"\"@app.callback(Output(\"tooltips\", \"children\"), [Input(\"callback-dump\", \"children\")])", "matches the preferences the best. \" \"The box on bottom", "maxi distance = (aspirations - relevant_data) / (ideal - nadir)", "className=\"card-text\", ), ] ) ], className=\"mr-3 ml-3 mb-2 mt-2\", ),", "\"This app uses decision support tools to \" \"quickly and", "included=False, marks={ 2: \"2\", 3: \"3\", 4: \"4\", 5: \"5\",", "= details.rename(columns=names) maxi = details.loc[1].astype(int) details_on_card = details.loc[2].astype(int) details_on_card =", "), dbc.Form( [ dbc.FormGroup( children=[ dbc.Label( \"Choose desired operating system\",", "\"10\", 11: \"11\", 12: \"12\", }, className=\"text-center mt-5\", ), ],", "ml-3 mb-2 mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired camera", "= np.asarray([-1, -1, -1, 1]) relevant_data = relevant_data * maxi", "), dbc.Row([html.Div(id=\"callback-dump\")]), ], ) @app.callback( [ Output(\"results\", \"children\"), *[Output(f\"other-results-list-{i}\", \"children\")", "children=[ # .container class is fixed, .container.scalable is scalable dbc.Row(", "in range(2, 6) ] ), ), ], className=\"mt-4\", ), html.Div(id=\"tooltips\"),", "optimal phone?\", className=\"text-center mt-4\", ) ) ] ), dbc.Row( [", "choices[0] == \"IOS\": choice_data = data[[True if \"IOS\" in st", "body = [html.Tbody([html.Tr([html.Td(data[col]) for col in data.index])])] return dbc.Table(header +", "[ html.Tr( [ html.Th(col), html.Td([str(data[col]),],), html.Td([html.Span(\" ▉\", style={\"color\": c,},)],), ]", "+ [\"green\" if x >= 0 else \"red\" for x", "children=[ dbc.CardHeader(\"The best phone for you is:\"), dbc.CardBody(id=\"results\"), ], className=\"mb-4\",", "\"background-color\": \"white\", \"color\": \"white\", \"border-style\": \"solid\", \"border-color\": \"black\", }, )", "\"400\", 600: \"600\", 800: \"800\", 1000: \"1000\", 1200: \"1200\", 1400:", "= i + 1 return contents, tables if __name__ ==", "{\"label\": \"iOS\", \"value\": \"IOS\"}, { \"label\": \"No preference\", \"value\": \"both\",", "6: \"6\", 7: \"7\", 8: \"8\", 9: \"9\", 10: \"10\",", "), dbc.Col( children=[ dbc.Card( children=[ dbc.CardHeader(\"The best phone for you", "details = pd.read_csv(\"./data/Phone_details.csv\", header=0) names = details.loc[0] data = data.rename(columns=names)", "\"value\": \"IOS\"}, { \"label\": \"No preference\", \"value\": \"both\", }, ],", "[html.Thead(html.Tr([html.Th(col) for col in data.index]))] body = [html.Tbody([html.Tr([html.Td(data[col]) for col", "), ], style={\"maxHeight\": \"560px\", \"overflow\": \"auto\"}, ), ], width={\"size\": 5,", "mt-2\", ), dbc.Form( [ dbc.FormGroup( children=[ dbc.Label( \"Choose desired operating", "{ \"Memory\": -1, \"RAM\": -1, \"Camera (MP)\": -1, \"Price (Euros)\":", "\"Choose desired budget (Euros)\", html_for=\"cost-choice\", ), dcc.Slider( id=\"cost-choice\", min=0, max=1400,", "[\"green\" if x >= 0 else \"red\" for x in", "in zip(data.index, colors) ] ) ] ) def table_from_data_horizontal(data): header", "\"iOS\", \"value\": \"IOS\"}, { \"label\": \"No preference\", \"value\": \"both\", },", "and easily find phones which reflect \" \"the user's desires.", "), dcc.Slider( id=\"cost-choice\", min=0, max=1400, step=1, included=False, value=100, marks={ 0:", "\"solid\", \"border-color\": \"black\", }, ) for i in range(2, 6)", "\"IOS\"}, { \"label\": \"No preference\", \"value\": \"both\", }, ], id=\"os-choice\",", "2 for index, row in data.iterrows(): contents.append(f\"{i}. {row['Model']}\") tables.append(table_from_data_horizontal(row)) i", "ml-3 mb-2 mt-2\", ), dbc.Form( [ dbc.FormGroup( children=[ dbc.Label( \"Choose", "(MP)\", \"Price (Euros)\",] ].reset_index(drop=True) card_data = choice_data[details_on_card].reset_index(drop=True) maxi = np.asarray([-1,", "dbc.Card( children=[ dbc.CardBody( [ html.H4( \"Researcher's Night Event\", className=\"card-title text-center\",", "some \" \"close alternatives.\" ), className=\"card-text\", ), ] ) ],", "ml-3 mb-2 mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose desired RAM", "\"4\", 5: \"5\", 6: \"6\", 7: \"7\", 8: \"8\", 9:", "great phones:\"), dbc.CardBody( id=\"other-results\", children=( [ html.P( html.Span( f\"{i}. \",", "relevant_data = choice_data[ [\"Memory\", \"RAM\", \"Camera (MP)\", \"Price (Euros)\",] ].reset_index(drop=True)", "the phone \" \"which matches the preferences the best. \"", "print(choices) to_compare = [\"Memory\", \"RAM\", \"Camera (MP)\", \"Price (Euros)\"] #", "import PreventUpdate import dash_core_components as dcc import dash_html_components as html", "\"Researcher's Night Event\", className=\"card-title text-center\", ), html.P( ( \"This app", "False for st in data[\"OS\"]]] if choices[0] == \"Android\": choice_data", "other_options(data): contents = [] tables = [] ids = []", "], className=\"mr-3 ml-3 mb-2 mt-2\", ), dbc.FormGroup( children=[ dbc.Label( \"Choose", "def other_options(data): contents = [] tables = [] ids =", "[None for i in range(len(tooltips) + 2, 6)] return (best,", "details.loc[0] data = data.rename(columns=names) details = details.rename(columns=names) maxi = details.loc[1].astype(int)", "), ], className=\"mr-3 ml-3 mb-2 mt-2\", ), ], style={\"maxHeight\": \"560px\",", "1] fitness_columns = { \"Memory\": -1, \"RAM\": -1, \"Camera (MP)\":", "children=[ dbc.Label( \"Choose desired camera resolution (MP)\", html_for=\"cam-choice\", ), dcc.Slider(", "min=0, max=130, step=1, included=False, value=70, marks={ 0: \"0\", 10: \"10\",", "70: \"70\", 90: \"90\", 110: \"110\", 130: \"130\", }, className=\"text-center", "== \"both\": choice_data = data elif choices[0] == \"IOS\": choice_data", "data.index]))] body = [html.Tbody([html.Tr([html.Td(data[col]) for col in data.index])])] return dbc.Table(header", "marks={ 0: \"0\", 200: \"200\", 400: \"400\", 600: \"600\", 800:", "in data.index])])] return dbc.Table(header + body) def other_options(data): contents =", "plotly.express as ex import plotly.graph_objects as go import pandas as", "-1, \"Camera (MP)\": -1, \"Price (Euros)\": 1, } fitness_data =", "in st else False for st in data[\"OS\"]]] relevant_data =", "dbc.Table( [ html.Tbody( [ html.Tr( [ html.Th(col), html.Td([str(data[col]),],), html.Td([html.Span(\" ▉\",", "5, \"offset\": 1}, ), dbc.Col( children=[ dbc.Card( children=[ dbc.CardHeader(\"The best", "ml-3 mb-2 mt-2\", ), ], style={\"maxHeight\": \"560px\", \"overflow\": \"auto\"}, ),", "suppress_callback_exceptions=True, ) app.layout = html.Div( children=[ # .container class is", "(GB)\", html_for=\"ram-choice\", ), dcc.Slider( id=\"ram-choice\", min=2, max=12, step=1, value=12, included=False,", "{row['Model']}\") tables.append(table_from_data_horizontal(row)) i = i + 1 return contents, tables", "as go import pandas as pd import numpy as np", "-1, 1]) relevant_data = relevant_data * maxi ideal = relevant_data.min().values", ") for (col, c) in zip(data.index, colors) ] ) ]", "index, row in data.iterrows(): contents.append(f\"{i}. {row['Model']}\") tables.append(table_from_data_horizontal(row)) i = i", "with details(?) dbc.Card( children=[ dbc.CardBody( [ html.H4( \"Researcher's Night Event\",", "max=256, step=None, included=False, value=256, marks={ 16: \"16\", 32: \"32\", 64:", "\"Android\", \"value\": \"Android\", }, {\"label\": \"iOS\", \"value\": \"IOS\"}, { \"label\":", "-1, -1, 1]) relevant_data = relevant_data * maxi ideal =", "32: \"32\", 64: \"64\", 128: \"128\", 256: \"256\", }, #", "[ dbc.Col( children=[ # Top card with details(?) dbc.Card( children=[", "elif choices[0] == \"IOS\": choice_data = data[[True if \"IOS\" in", "html from dash.dependencies import Input, Output, State import dash_bootstrap_components as", "\"color\": \"white\", \"border-style\": \"solid\", \"border-color\": \"black\", }, ) for i", "data elif choices[0] == \"IOS\": choice_data = data[[True if \"IOS\"", "in range(len(tooltips) + 2, 6)] return (best, *others, *tooltips) \"\"\"@app.callback(Output(\"tooltips\",", "mt-4\", ) ) ] ), dbc.Row( [ dbc.Col( children=[ #", "}, ) for i in range(2, 6) ] ), ),", "pd import numpy as np data = pd.read_csv(\"./data/Phone_dataset_new.csv\", header=0) details", "\"No preference\", \"value\": \"both\", }, ], id=\"os-choice\", value=\"both\", inline=True, #", "0 else \"red\" for x in diff] # print(np.sign(diff)) return", "dbc.FormGroup( children=[ dbc.Label( \"Choose desired RAM capacity (GB)\", html_for=\"ram-choice\", ),", "details = details.rename(columns=names) maxi = details.loc[1].astype(int) details_on_card = details.loc[2].astype(int) details_on_card", "choice_data[ [\"Memory\", \"RAM\", \"Camera (MP)\", \"Price (Euros)\",] ].reset_index(drop=True) card_data =", "# .container class is fixed, .container.scalable is scalable dbc.Row( [", "Input your preferences \" \"below. The box on top right", "import pandas as pd import numpy as np data =", "in data.index]))] body = [html.Tbody([html.Tr([html.Td(data[col]) for col in data.index])])] return", "\"ram\", \"cam\", \"cost\"] ], ) def results(*choices): if choices[0] ==", "range(2, 6) ] ), ), ], className=\"mt-4\", ), html.Div(id=\"tooltips\"), ],", "in data.iterrows(): contents.append(f\"{i}. {row['Model']}\") tables.append(table_from_data_horizontal(row)) i = i + 1", "if x >= 0 else \"red\" for x in diff]", "110: \"110\", 130: \"130\", }, className=\"text-center mt-5\", ), ], className=\"mr-3", "from dash.dependencies import Input, Output, State import dash_bootstrap_components as dbc" ]
[ "a closure turning objectify into a single argument function. This", "properties for a given class. Recursively calls up to parent", "an instance of the class created using the JSON data.", "obj.__class__ # Create empty data data = {} sprops,cprops =", "a given class. Recursively calls up to parent classes that", "that applies those functions over all of the keys and", "parent_cprops = _get_registered_props(parent_cls) sprops = list(set(sprops).union(set(parent_sprops))) cprops2 = parent_cprops.copy() cprops2.update(cprops)", "inv_fn)} if cls in pd.conc_to_abstract: # {ConcreteClass: (AbstractClass, _)} parent_cls", "= concrete_cls() sprops,cprops = _get_registered_props(cls) # Add simple properties from", "need to access some of the module-level parameters. \"\"\" #", "objectify(d, cls) def objectify(data, cls): \"\"\" Function takes JSON data", "function applied to all keys (default identity) vfun: function applied", "some operation is needed on either the keys or values,", "Function takes JSON data and a target class as arguments", "identity) (k -> k') -> (v -> v') -> ((k,", "the >>> @sprop.b # unobjectify function what parameter need >>>", "\"\"\" sprops = pd.class_sprops.get(cls,{}) # [name] cprops = pd.class_cprops.get(cls,{}) #", "a >>> self.b = b >>> self.c = c >>>", "\"\"\" # Create empty class concrete_cls = pd.conc2(data, cls) obj", "transformed version of the map. kfun: function applied to all", "the class created using the JSON data. I'm not sure", "cls) return data def _get_registered_props(cls): \"\"\" Returns all of the", "x): \"\"\" Function that takes two functions as arguments and", "data[p]=getattr(obj,p) # Add calculated data for p in cprops: f2", "Identity function is needed when performing transformations on maps where", "(must be a class instance) into the corresponding JSON data.", "unobjectify(baz) { 'a':1, 'b':2, 'c':'three' } \"\"\" cls = obj.__class__", "are needed to tell the >>> @sprop.b # unobjectify function", "cls) def objectify(data, cls): \"\"\" Function takes JSON data and", "sprops: data[p]=getattr(obj,p) # Add calculated data for p in cprops:", "for a given class. Recursively calls up to parent classes", "cprops = pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)} if cls in pd.conc_to_abstract:", "out. >>> class Baz(object): pass >>> def __init__(self, a, b,", "from data for p in sprops: setattr(obj, p, data[p]) #", "annotations by removing the need to write lambda functions. \"\"\"", "# Add calculated properties from data for p in cprops:", "{name:(fn, inv_fn)} if cls in pd.conc_to_abstract: # {ConcreteClass: (AbstractClass, _)}", "objectify into a single argument function. This cuts down the", "they need to access some of the module-level parameters. \"\"\"", "= _get_registered_props(cls) # Add simple properties from data for p", "class created using the JSON data. I'm not sure whether", "def _get_registered_props(cls): \"\"\" Returns all of the registered properties for", "(default identity) (k -> k') -> (v -> v') ->", "data for p in sprops: setattr(obj, p, data[p]) # Add", "def __init__(self, a, b, c): >>> self.a = a >>>", "as arguments and returns a function that applies those functions", "return obj def transform_map(kfun=lambda x: x, vfun=lambda x: x): \"\"\"", "<reponame>k-j-m/Pyxon<filename>pyxon/utils.py<gh_stars>0 import pyxon.decode as pd def unobjectify(obj): \"\"\" Turns a", "return lambda dct: dict([(kfun(k),vfun(v)) for k,v in dct.items()]) def transform_list(item_decoder=lambda", "data[p]) # Add calculated properties from data for p in", "version of the map. kfun: function applied to all keys", "either the keys or values, but not both. \"\"\" return", "in pd.conc_to_abstract: # {ConcreteClass: (AbstractClass, _)} parent_cls = pd.conc_to_abstract[cls][0] parent_sprops,", "python object (must be a class instance) into the corresponding", "a single argument function. This cuts down the amount of", "cprops[p][1] data[p]=f2(getattr(obj,p)) data = pd.add_type_property(data, cls) return data def _get_registered_props(cls):", "data data = {} sprops,cprops = _get_registered_props(cls) # Add simple", "= pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)} if cls in pd.conc_to_abstract: #", "p in sprops: data[p]=getattr(obj,p) # Add calculated data for p", "-> (k', v')) \"\"\" return lambda dct: dict([(kfun(k),vfun(v)) for k,v", "# Add simple properties for p in sprops: data[p]=getattr(obj,p) #", "JSON data and a target class as arguments and returns", "cls in pd.conc_to_abstract: # {ConcreteClass: (AbstractClass, _)} parent_cls = pd.conc_to_abstract[cls][0]", "self.c = c >>> >>> baz = Baz(a=1, b=2, c='three')", "to write lambda functions. \"\"\" return lambda d: objectify(d, cls)", "a function that applies those functions over all of the", "identity(x): \"\"\" Identity function is needed when performing transformations on", "class instance) into the corresponding JSON data. Example: >>> @sprop.a", "great idea to keep (un)objectify separate from the decode module,", "need to write lambda functions. \"\"\" return lambda d: objectify(d,", "be a class instance) into the corresponding JSON data. Example:", "return data def _get_registered_props(cls): \"\"\" Returns all of the registered", "def identity(x): \"\"\" Identity function is needed when performing transformations", "cuts down the amount of code needed in class annotations", "argument function. This cuts down the amount of code needed", "if cls in pd.conc_to_abstract: # {ConcreteClass: (AbstractClass, _)} parent_cls =", "classes that are inherited from. \"\"\" sprops = pd.class_sprops.get(cls,{}) #", "transform_map(kfun=lambda x: x, vfun=lambda x: x): \"\"\" Function that takes", "using the JSON data. I'm not sure whether it is", ">>> class Baz(object): pass >>> def __init__(self, a, b, c):", "all of the keys and values in a map and", "data. I'm not sure whether it is a great idea", "turning objectify into a single argument function. This cuts down", "those functions over all of the keys and values in", "simple properties from data for p in sprops: setattr(obj, p,", "data and a target class as arguments and returns an", "is needed when performing transformations on maps where some operation", "Recursively calls up to parent classes that are inherited from.", "= parent_cprops.copy() cprops2.update(cprops) cprops = cprops2 return sprops,cprops def obj(cls):", "return lambda lst: map(item_decoder, lst) def identity(x): \"\"\" Identity function", "= a >>> self.b = b >>> self.c = c", "lambda functions. \"\"\" return lambda d: objectify(d, cls) def objectify(data,", "function is needed when performing transformations on maps where some", "some of the module-level parameters. \"\"\" # Create empty class", "c='three') >>> unobjectify(baz) { 'a':1, 'b':2, 'c':'three' } \"\"\" cls", "class. Recursively calls up to parent classes that are inherited", "amount of code needed in class annotations by removing the", "@sprop.c # to be written out. >>> class Baz(object): pass", "= {} sprops,cprops = _get_registered_props(cls) # Add simple properties for", "_get_registered_props(parent_cls) sprops = list(set(sprops).union(set(parent_sprops))) cprops2 = parent_cprops.copy() cprops2.update(cprops) cprops =", "list(set(sprops).union(set(parent_sprops))) cprops2 = parent_cprops.copy() cprops2.update(cprops) cprops = cprops2 return sprops,cprops", "f1(data[p])) return obj def transform_map(kfun=lambda x: x, vfun=lambda x: x):", "functions over all of the keys and values in a", "= c >>> >>> baz = Baz(a=1, b=2, c='three') >>>", "simple properties for p in sprops: data[p]=getattr(obj,p) # Add calculated", "the amount of code needed in class annotations by removing", "a class instance) into the corresponding JSON data. Example: >>>", "cls = obj.__class__ # Create empty data data = {}", "\"\"\" Function takes JSON data and a target class as", "all values (default identity) (k -> k') -> (v ->", "{ 'a':1, 'b':2, 'c':'three' } \"\"\" cls = obj.__class__ #", "for p in cprops: f2 = cprops[p][1] data[p]=f2(getattr(obj,p)) data =", "cprops2 = parent_cprops.copy() cprops2.update(cprops) cprops = cprops2 return sprops,cprops def", "calculated data for p in cprops: f2 = cprops[p][1] data[p]=f2(getattr(obj,p))", "to parent classes that are inherited from. \"\"\" sprops =", "in sprops: data[p]=getattr(obj,p) # Add calculated data for p in", "a, b, c): >>> self.a = a >>> self.b =", "def obj(cls): \"\"\" Helper function returns a closure turning objectify", "pd.add_type_property(data, cls) return data def _get_registered_props(cls): \"\"\" Returns all of", "self.b = b >>> self.c = c >>> >>> baz", "returns a function that applies those functions over all of", "into the corresponding JSON data. Example: >>> @sprop.a # sprop", "= Baz(a=1, b=2, c='three') >>> unobjectify(baz) { 'a':1, 'b':2, 'c':'three'", "module, since they need to access some of the module-level", "cls): \"\"\" Function takes JSON data and a target class", "function. This cuts down the amount of code needed in", "is a great idea to keep (un)objectify separate from the", "to all values (default identity) (k -> k') -> (v", "'a':1, 'b':2, 'c':'three' } \"\"\" cls = obj.__class__ # Create", "removing the need to write lambda functions. \"\"\" return lambda", "def transform_map(kfun=lambda x: x, vfun=lambda x: x): \"\"\" Function that", "target class as arguments and returns an instance of the", "# unobjectify function what parameter need >>> @sprop.c # to", "since they need to access some of the module-level parameters.", "# {name:(fn, inv_fn)} if cls in pd.conc_to_abstract: # {ConcreteClass: (AbstractClass,", "# Create empty class concrete_cls = pd.conc2(data, cls) obj =", "is needed on either the keys or values, but not", "of the module-level parameters. \"\"\" # Create empty class concrete_cls", "_get_registered_props(cls) # Add simple properties from data for p in", "as arguments and returns an instance of the class created", "pd.conc_to_abstract[cls][0] parent_sprops, parent_cprops = _get_registered_props(parent_cls) sprops = list(set(sprops).union(set(parent_sprops))) cprops2 =", "registered properties for a given class. Recursively calls up to", "object (must be a class instance) into the corresponding JSON", "v) -> (k', v')) \"\"\" return lambda dct: dict([(kfun(k),vfun(v)) for", "c): >>> self.a = a >>> self.b = b >>>", "data for p in cprops: f2 = cprops[p][1] data[p]=f2(getattr(obj,p)) data", "where some operation is needed on either the keys or", "in dct.items()]) def transform_list(item_decoder=lambda x: x): return lambda lst: map(item_decoder,", "properties from data for p in cprops: f1 = cprops[p][0]", "be written out. >>> class Baz(object): pass >>> def __init__(self,", "'c':'three' } \"\"\" cls = obj.__class__ # Create empty data", "pyxon.decode as pd def unobjectify(obj): \"\"\" Turns a python object", "_get_registered_props(cls): \"\"\" Returns all of the registered properties for a", "pd.class_sprops.get(cls,{}) # [name] cprops = pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)} if", "the decode module, since they need to access some of", "and values in a map and returns the transformed version", "= _get_registered_props(parent_cls) sprops = list(set(sprops).union(set(parent_sprops))) cprops2 = parent_cprops.copy() cprops2.update(cprops) cprops", "import pyxon.decode as pd def unobjectify(obj): \"\"\" Turns a python", "in class annotations by removing the need to write lambda", "single argument function. This cuts down the amount of code", "= cprops2 return sprops,cprops def obj(cls): \"\"\" Helper function returns", "# to be written out. >>> class Baz(object): pass >>>", "all keys (default identity) vfun: function applied to all values", "corresponding JSON data. Example: >>> @sprop.a # sprop annotations are", "cprops: f2 = cprops[p][1] data[p]=f2(getattr(obj,p)) data = pd.add_type_property(data, cls) return", "} \"\"\" cls = obj.__class__ # Create empty data data", "module-level parameters. \"\"\" # Create empty class concrete_cls = pd.conc2(data,", "{ConcreteClass: (AbstractClass, _)} parent_cls = pd.conc_to_abstract[cls][0] parent_sprops, parent_cprops = _get_registered_props(parent_cls)", "data = {} sprops,cprops = _get_registered_props(cls) # Add simple properties", ">>> def __init__(self, a, b, c): >>> self.a = a", "values (default identity) (k -> k') -> (v -> v')", "the corresponding JSON data. Example: >>> @sprop.a # sprop annotations", "class concrete_cls = pd.conc2(data, cls) obj = concrete_cls() sprops,cprops =", "need >>> @sprop.c # to be written out. >>> class", "identity) vfun: function applied to all values (default identity) (k", "returns an instance of the class created using the JSON", "x: x): return lambda lst: map(item_decoder, lst) def identity(x): \"\"\"", "on maps where some operation is needed on either the", "p in cprops: f2 = cprops[p][1] data[p]=f2(getattr(obj,p)) data = pd.add_type_property(data,", "p in sprops: setattr(obj, p, data[p]) # Add calculated properties", "properties for p in sprops: data[p]=getattr(obj,p) # Add calculated data", "idea to keep (un)objectify separate from the decode module, since", "Example: >>> @sprop.a # sprop annotations are needed to tell", ">>> self.b = b >>> self.c = c >>> >>>", "\"\"\" Helper function returns a closure turning objectify into a", "_)} parent_cls = pd.conc_to_abstract[cls][0] parent_sprops, parent_cprops = _get_registered_props(parent_cls) sprops =", "Baz(object): pass >>> def __init__(self, a, b, c): >>> self.a", "= _get_registered_props(cls) # Add simple properties for p in sprops:", "lst: map(item_decoder, lst) def identity(x): \"\"\" Identity function is needed", "v')) \"\"\" return lambda dct: dict([(kfun(k),vfun(v)) for k,v in dct.items()])", "in sprops: setattr(obj, p, data[p]) # Add calculated properties from", "p, f1(data[p])) return obj def transform_map(kfun=lambda x: x, vfun=lambda x:", "for p in sprops: data[p]=getattr(obj,p) # Add calculated data for", "the map. kfun: function applied to all keys (default identity)", "empty data data = {} sprops,cprops = _get_registered_props(cls) # Add", "whether it is a great idea to keep (un)objectify separate", "performing transformations on maps where some operation is needed on", "into a single argument function. This cuts down the amount", "arguments and returns a function that applies those functions over", "baz = Baz(a=1, b=2, c='three') >>> unobjectify(baz) { 'a':1, 'b':2,", "\"\"\" return lambda d: objectify(d, cls) def objectify(data, cls): \"\"\"", "cprops[p][0] setattr(obj, p, f1(data[p])) return obj def transform_map(kfun=lambda x: x,", "the keys or values, but not both. \"\"\" return x", "returns the transformed version of the map. kfun: function applied", "from the decode module, since they need to access some", "-> k') -> (v -> v') -> ((k, v) ->", "to keep (un)objectify separate from the decode module, since they", "a great idea to keep (un)objectify separate from the decode", "it is a great idea to keep (un)objectify separate from", "as pd def unobjectify(obj): \"\"\" Turns a python object (must", "transformations on maps where some operation is needed on either", "that takes two functions as arguments and returns a function", "x: x, vfun=lambda x: x): \"\"\" Function that takes two", "and returns a function that applies those functions over all", "parameter need >>> @sprop.c # to be written out. >>>", "transform_list(item_decoder=lambda x: x): return lambda lst: map(item_decoder, lst) def identity(x):", "when performing transformations on maps where some operation is needed", "concrete_cls() sprops,cprops = _get_registered_props(cls) # Add simple properties from data", "Returns all of the registered properties for a given class.", "instance) into the corresponding JSON data. Example: >>> @sprop.a #", ">>> >>> baz = Baz(a=1, b=2, c='three') >>> unobjectify(baz) {", "= pd.class_sprops.get(cls,{}) # [name] cprops = pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)}", "keys (default identity) vfun: function applied to all values (default", "map. kfun: function applied to all keys (default identity) vfun:", "the module-level parameters. \"\"\" # Create empty class concrete_cls =", "(un)objectify separate from the decode module, since they need to", "@sprop.b # unobjectify function what parameter need >>> @sprop.c #", "to tell the >>> @sprop.b # unobjectify function what parameter", "((k, v) -> (k', v')) \"\"\" return lambda dct: dict([(kfun(k),vfun(v))", "b, c): >>> self.a = a >>> self.b = b", "setattr(obj, p, f1(data[p])) return obj def transform_map(kfun=lambda x: x, vfun=lambda", "__init__(self, a, b, c): >>> self.a = a >>> self.b", "applied to all keys (default identity) vfun: function applied to", "applied to all values (default identity) (k -> k') ->", "maps where some operation is needed on either the keys", "tell the >>> @sprop.b # unobjectify function what parameter need", "map and returns the transformed version of the map. kfun:", "keep (un)objectify separate from the decode module, since they need", "of code needed in class annotations by removing the need", "sprops: setattr(obj, p, data[p]) # Add calculated properties from data", "# {ConcreteClass: (AbstractClass, _)} parent_cls = pd.conc_to_abstract[cls][0] parent_sprops, parent_cprops =", "_get_registered_props(cls) # Add simple properties for p in sprops: data[p]=getattr(obj,p)", "\"\"\" cls = obj.__class__ # Create empty data data =", "needed to tell the >>> @sprop.b # unobjectify function what", "(k', v')) \"\"\" return lambda dct: dict([(kfun(k),vfun(v)) for k,v in", "self.a = a >>> self.b = b >>> self.c =", "concrete_cls = pd.conc2(data, cls) obj = concrete_cls() sprops,cprops = _get_registered_props(cls)", "a python object (must be a class instance) into the", "values in a map and returns the transformed version of", "pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)} if cls in pd.conc_to_abstract: # {ConcreteClass:", "up to parent classes that are inherited from. \"\"\" sprops", "-> (v -> v') -> ((k, v) -> (k', v'))", "Add calculated data for p in cprops: f2 = cprops[p][1]", "function returns a closure turning objectify into a single argument", "write lambda functions. \"\"\" return lambda d: objectify(d, cls) def", "data for p in cprops: f1 = cprops[p][0] setattr(obj, p,", "what parameter need >>> @sprop.c # to be written out.", "b >>> self.c = c >>> >>> baz = Baz(a=1,", "of the map. kfun: function applied to all keys (default", "function that applies those functions over all of the keys", "-> v') -> ((k, v) -> (k', v')) \"\"\" return", "in cprops: f2 = cprops[p][1] data[p]=f2(getattr(obj,p)) data = pd.add_type_property(data, cls)", "# Add simple properties from data for p in sprops:", "applies those functions over all of the keys and values", "sprops = list(set(sprops).union(set(parent_sprops))) cprops2 = parent_cprops.copy() cprops2.update(cprops) cprops = cprops2", "created using the JSON data. I'm not sure whether it", "in cprops: f1 = cprops[p][0] setattr(obj, p, f1(data[p])) return obj", "to access some of the module-level parameters. \"\"\" # Create", "(k -> k') -> (v -> v') -> ((k, v)", "needed on either the keys or values, but not both.", "x, vfun=lambda x: x): \"\"\" Function that takes two functions", "(default identity) vfun: function applied to all values (default identity)", "closure turning objectify into a single argument function. This cuts", "cls) obj = concrete_cls() sprops,cprops = _get_registered_props(cls) # Add simple", "p, data[p]) # Add calculated properties from data for p", "b=2, c='three') >>> unobjectify(baz) { 'a':1, 'b':2, 'c':'three' } \"\"\"", "\"\"\" Returns all of the registered properties for a given", "Helper function returns a closure turning objectify into a single", "Create empty data data = {} sprops,cprops = _get_registered_props(cls) #", "all of the registered properties for a given class. Recursively", "sprops,cprops = _get_registered_props(cls) # Add simple properties for p in", "k') -> (v -> v') -> ((k, v) -> (k',", "= cprops[p][1] data[p]=f2(getattr(obj,p)) data = pd.add_type_property(data, cls) return data def", "needed in class annotations by removing the need to write", "data = pd.add_type_property(data, cls) return data def _get_registered_props(cls): \"\"\" Returns", "the JSON data. I'm not sure whether it is a", "v') -> ((k, v) -> (k', v')) \"\"\" return lambda", "properties from data for p in sprops: setattr(obj, p, data[p])", "inherited from. \"\"\" sprops = pd.class_sprops.get(cls,{}) # [name] cprops =", "for p in sprops: setattr(obj, p, data[p]) # Add calculated", "dct: dict([(kfun(k),vfun(v)) for k,v in dct.items()]) def transform_list(item_decoder=lambda x: x):", "lambda d: objectify(d, cls) def objectify(data, cls): \"\"\" Function takes", "pass >>> def __init__(self, a, b, c): >>> self.a =", "in a map and returns the transformed version of the", "(v -> v') -> ((k, v) -> (k', v')) \"\"\"", "keys and values in a map and returns the transformed", "written out. >>> class Baz(object): pass >>> def __init__(self, a,", ">>> @sprop.c # to be written out. >>> class Baz(object):", "Add calculated properties from data for p in cprops: f1", "{} sprops,cprops = _get_registered_props(cls) # Add simple properties for p", "data[p]=f2(getattr(obj,p)) data = pd.add_type_property(data, cls) return data def _get_registered_props(cls): \"\"\"", "data def _get_registered_props(cls): \"\"\" Returns all of the registered properties", "obj(cls): \"\"\" Helper function returns a closure turning objectify into", "\"\"\" Turns a python object (must be a class instance)", "cprops: f1 = cprops[p][0] setattr(obj, p, f1(data[p])) return obj def", "def unobjectify(obj): \"\"\" Turns a python object (must be a", "d: objectify(d, cls) def objectify(data, cls): \"\"\" Function takes JSON", "down the amount of code needed in class annotations by", "Add simple properties for p in sprops: data[p]=getattr(obj,p) # Add", "separate from the decode module, since they need to access", "instance of the class created using the JSON data. I'm", "setattr(obj, p, data[p]) # Add calculated properties from data for", "cprops2.update(cprops) cprops = cprops2 return sprops,cprops def obj(cls): \"\"\" Helper", "from. \"\"\" sprops = pd.class_sprops.get(cls,{}) # [name] cprops = pd.class_cprops.get(cls,{})", "and a target class as arguments and returns an instance", ">>> @sprop.b # unobjectify function what parameter need >>> @sprop.c", "parent_cprops.copy() cprops2.update(cprops) cprops = cprops2 return sprops,cprops def obj(cls): \"\"\"", "data. Example: >>> @sprop.a # sprop annotations are needed to", "functions. \"\"\" return lambda d: objectify(d, cls) def objectify(data, cls):", "pd.conc_to_abstract: # {ConcreteClass: (AbstractClass, _)} parent_cls = pd.conc_to_abstract[cls][0] parent_sprops, parent_cprops", "class annotations by removing the need to write lambda functions.", "class Baz(object): pass >>> def __init__(self, a, b, c): >>>", "\"\"\" Identity function is needed when performing transformations on maps", "return sprops,cprops def obj(cls): \"\"\" Helper function returns a closure", "arguments and returns an instance of the class created using", "empty class concrete_cls = pd.conc2(data, cls) obj = concrete_cls() sprops,cprops", "sprop annotations are needed to tell the >>> @sprop.b #", "that are inherited from. \"\"\" sprops = pd.class_sprops.get(cls,{}) # [name]", "obj def transform_map(kfun=lambda x: x, vfun=lambda x: x): \"\"\" Function", "Function that takes two functions as arguments and returns a", "@sprop.a # sprop annotations are needed to tell the >>>", "the need to write lambda functions. \"\"\" return lambda d:", "= pd.add_type_property(data, cls) return data def _get_registered_props(cls): \"\"\" Returns all", "sure whether it is a great idea to keep (un)objectify", "for p in cprops: f1 = cprops[p][0] setattr(obj, p, f1(data[p]))", "two functions as arguments and returns a function that applies", "[name] cprops = pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)} if cls in", "access some of the module-level parameters. \"\"\" # Create empty", "dict([(kfun(k),vfun(v)) for k,v in dct.items()]) def transform_list(item_decoder=lambda x: x): return", "lambda lst: map(item_decoder, lst) def identity(x): \"\"\" Identity function is", "def transform_list(item_decoder=lambda x: x): return lambda lst: map(item_decoder, lst) def", ">>> baz = Baz(a=1, b=2, c='three') >>> unobjectify(baz) { 'a':1,", "function applied to all values (default identity) (k -> k')", "takes two functions as arguments and returns a function that", "x): return lambda lst: map(item_decoder, lst) def identity(x): \"\"\" Identity", ">>> @sprop.a # sprop annotations are needed to tell the", "# [name] cprops = pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)} if cls", "function what parameter need >>> @sprop.c # to be written", "sprops = pd.class_sprops.get(cls,{}) # [name] cprops = pd.class_cprops.get(cls,{}) # {name:(fn,", "pd.conc2(data, cls) obj = concrete_cls() sprops,cprops = _get_registered_props(cls) # Add", "unobjectify(obj): \"\"\" Turns a python object (must be a class", "= cprops[p][0] setattr(obj, p, f1(data[p])) return obj def transform_map(kfun=lambda x:", "of the registered properties for a given class. Recursively calls", "\"\"\" Function that takes two functions as arguments and returns", "Create empty class concrete_cls = pd.conc2(data, cls) obj = concrete_cls()", "'b':2, 'c':'three' } \"\"\" cls = obj.__class__ # Create empty", "lst) def identity(x): \"\"\" Identity function is needed when performing", "cprops2 return sprops,cprops def obj(cls): \"\"\" Helper function returns a", "not sure whether it is a great idea to keep", "sprops,cprops def obj(cls): \"\"\" Helper function returns a closure turning", "Add simple properties from data for p in sprops: setattr(obj,", "on either the keys or values, but not both. \"\"\"", "the keys and values in a map and returns the", "the transformed version of the map. kfun: function applied to", "to be written out. >>> class Baz(object): pass >>> def", "calls up to parent classes that are inherited from. \"\"\"", "parameters. \"\"\" # Create empty class concrete_cls = pd.conc2(data, cls)", "\"\"\" return lambda dct: dict([(kfun(k),vfun(v)) for k,v in dct.items()]) def", "JSON data. Example: >>> @sprop.a # sprop annotations are needed", "over all of the keys and values in a map", "obj = concrete_cls() sprops,cprops = _get_registered_props(cls) # Add simple properties", "parent classes that are inherited from. \"\"\" sprops = pd.class_sprops.get(cls,{})", "by removing the need to write lambda functions. \"\"\" return", "p in cprops: f1 = cprops[p][0] setattr(obj, p, f1(data[p])) return", "Turns a python object (must be a class instance) into", "This cuts down the amount of code needed in class", "decode module, since they need to access some of the", "dct.items()]) def transform_list(item_decoder=lambda x: x): return lambda lst: map(item_decoder, lst)", "= pd.conc_to_abstract[cls][0] parent_sprops, parent_cprops = _get_registered_props(parent_cls) sprops = list(set(sprops).union(set(parent_sprops))) cprops2", "of the keys and values in a map and returns", "f1 = cprops[p][0] setattr(obj, p, f1(data[p])) return obj def transform_map(kfun=lambda", "x: x): \"\"\" Function that takes two functions as arguments", "def objectify(data, cls): \"\"\" Function takes JSON data and a", "kfun: function applied to all keys (default identity) vfun: function", "= list(set(sprops).union(set(parent_sprops))) cprops2 = parent_cprops.copy() cprops2.update(cprops) cprops = cprops2 return", "annotations are needed to tell the >>> @sprop.b # unobjectify", "to all keys (default identity) vfun: function applied to all", "objectify(data, cls): \"\"\" Function takes JSON data and a target", "# Create empty data data = {} sprops,cprops = _get_registered_props(cls)", "= obj.__class__ # Create empty data data = {} sprops,cprops", "return lambda d: objectify(d, cls) def objectify(data, cls): \"\"\" Function", "class as arguments and returns an instance of the class", "f2 = cprops[p][1] data[p]=f2(getattr(obj,p)) data = pd.add_type_property(data, cls) return data", "(AbstractClass, _)} parent_cls = pd.conc_to_abstract[cls][0] parent_sprops, parent_cprops = _get_registered_props(parent_cls) sprops", "c >>> >>> baz = Baz(a=1, b=2, c='three') >>> unobjectify(baz)", "sprops,cprops = _get_registered_props(cls) # Add simple properties from data for", "Baz(a=1, b=2, c='three') >>> unobjectify(baz) { 'a':1, 'b':2, 'c':'three' }", "the registered properties for a given class. Recursively calls up", "k,v in dct.items()]) def transform_list(item_decoder=lambda x: x): return lambda lst:", "-> ((k, v) -> (k', v')) \"\"\" return lambda dct:", "I'm not sure whether it is a great idea to", "are inherited from. \"\"\" sprops = pd.class_sprops.get(cls,{}) # [name] cprops", "lambda dct: dict([(kfun(k),vfun(v)) for k,v in dct.items()]) def transform_list(item_decoder=lambda x:", ">>> self.a = a >>> self.b = b >>> self.c", "of the class created using the JSON data. I'm not", "calculated properties from data for p in cprops: f1 =", "cprops = cprops2 return sprops,cprops def obj(cls): \"\"\" Helper function", "= pd.conc2(data, cls) obj = concrete_cls() sprops,cprops = _get_registered_props(cls) #", "code needed in class annotations by removing the need to", "for k,v in dct.items()]) def transform_list(item_decoder=lambda x: x): return lambda", "from data for p in cprops: f1 = cprops[p][0] setattr(obj,", "unobjectify function what parameter need >>> @sprop.c # to be", "vfun: function applied to all values (default identity) (k ->", "takes JSON data and a target class as arguments and", "# sprop annotations are needed to tell the >>> @sprop.b", "= b >>> self.c = c >>> >>> baz =", "map(item_decoder, lst) def identity(x): \"\"\" Identity function is needed when", "functions as arguments and returns a function that applies those", "# Add calculated data for p in cprops: f2 =", "a target class as arguments and returns an instance of", "needed when performing transformations on maps where some operation is", "parent_sprops, parent_cprops = _get_registered_props(parent_cls) sprops = list(set(sprops).union(set(parent_sprops))) cprops2 = parent_cprops.copy()", "operation is needed on either the keys or values, but", "returns a closure turning objectify into a single argument function.", "vfun=lambda x: x): \"\"\" Function that takes two functions as", "a map and returns the transformed version of the map.", "parent_cls = pd.conc_to_abstract[cls][0] parent_sprops, parent_cprops = _get_registered_props(parent_cls) sprops = list(set(sprops).union(set(parent_sprops)))", ">>> self.c = c >>> >>> baz = Baz(a=1, b=2,", "and returns the transformed version of the map. kfun: function", "and returns an instance of the class created using the", ">>> unobjectify(baz) { 'a':1, 'b':2, 'c':'three' } \"\"\" cls =", "given class. Recursively calls up to parent classes that are", "JSON data. I'm not sure whether it is a great", "pd def unobjectify(obj): \"\"\" Turns a python object (must be" ]
[ "Processing the arguments args = vars(ap.parse_args(argv)) type_ = str(args[\"type\"]) verbosity_level", "the trained modeL. :param type_acquisition: String, \"SEM\" or \"TEM\" :param", "dictionary version of the configuration file from the path where", "= acquisition_name.stem if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp,", "Preparing the arguments to axon_segmentation function path_model, config = generate_default_parameters(type_,", "psm), \"The image size must be at least {0}x{0} after", "segment are located in those image folders) :param path_model: where", "({2}).\\n\".format(height, width, psm), \"The image size must be at least", "was trained on. :param verbosity_level: Level of verbosity. The higher,", "after resampling to that resolution.\\n\".format(round(acquired_resolution * min(image_size) / resolution_model)), \"Image", "the CLI if psm == None: # Check if a", "to the folder \\n'+ 'where the image(s) to segment is/are", "the default model corresponding to the type_model acquisition. :param type_model:", "psm < minimum_resolution: print(\"EXCEPTION: The size of one of the", "= new_path else: path_model = MODELS_PATH / TEM_DEFAULT_MODEL_NAME path_config_file =", "try: height, width = ads.imread(str(current_path_target)).shape except Exception as e: raise", "width] minimum_resolution = config[\"trainingset_patchsize\"] * resolution_model / min(image_size) if psm", "the image folder path. The pixel size \\n'+ 'in that", "String, \"SEM\" or \"TEM\" :param model_input_size: String or Int, the", "not image(s). Please update the input path(s) and try again.\")", "values: [10-100]. \\n', default=25) ap._action_groups.reverse() # Processing the arguments args", "based on the default segmentation models: SEM or # TEM.", "the image has a size of {0} after resampling to", "of model we are using selected_model = path_model.name # Read", "is: \\n'+str(default_SEM_path)+'\\n'+ 'The default TEM model path is: \\n'+str(default_TEM_path)+'\\n') ap.add_argument('-s',", ") # Going through all paths passed into arguments for", "that resolution.\\n\".format(round(psm * min(image_size) / resolution_model)), \"Image file location: {0}\".format(current_path_target)", "new_path) resolution_model = generate_resolution(type_, config[\"trainingset_patchsize\"]) # Tuple of valid file", "dict_size = { \"SEM\":{ \"512\":0.1, \"256\":0.2 }, \"TEM\":{ \"512\":0.01 }", "file_))) # Remove temporary file used for the segmentation fp.close()", "script lets the user segment automatically one or many images", "def segment_image(path_testing_image, path_model, overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0):", "Path(args[\"model\"]) if args[\"model\"] else None # Preparing the arguments to", "parameters used for segmentation for the default model corresponding to", "= ap.add_argument_group('required arguments') # Setting the arguments of the segmentation", "it. if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target.parent / 'pixel_size_in_micrometer.txt',", "SEM_DEFAULT_MODEL_NAME = \"default_SEM_model_v1\" TEM_DEFAULT_MODEL_NAME = \"default_TEM_model_v1\" MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models')", "we load the default model. if type_acquisition == 'SEM': if", "the segmentation fp.close() (path_acquisition / '__tmp_segment__.png').unlink() else: print((\"The path {0}", "pixel_size_in_micrometer.txt file in image folder. \", \"Please provide a pixel", "''' # If string, convert to Path objects path_testing_images_folder =", "import argparse from argparse import RawTextHelpFormatter from tqdm import tqdm", ") sys.exit(2) # Performing the segmentation over the image segment_image(current_path_target,", "generate_resolution(type_, config[\"trainingset_patchsize\"]) # Tuple of valid file extensions validExtensions =", "} } return dict_size[str(type_acquisition)][str(model_input_size)] # Main loop def main(argv=None): '''", "or None if no configuration file was found at the", "Also displays the patch number being processed in the current", "''' Generates the resolution to use related to the trained", "the path of the image(s) being segmented. \\n'+ '2: Also", "if current_path_target.suffix.lower() in validExtensions: # Handle cases if no resolution", "folders) :param path_model: where to access the model. :param overlap_value:", "path {0} does not exist.\".format(path_testing_image))) return None def segment_folders(path_testing_images_folder, path_model,", "if no configuration file was found at the mentioned path.", "<NAME> - 2017-08-30 # Imports import sys from pathlib import", "tmp file fp = open(path_acquisition / '__tmp_segment__.png', 'wb+') img_name_original =", "value 3: Missing value or file ''' print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__))) ap", "new_path = convert_path(new_path) # Building the path of the requested", "elif type_acquisition == 'TEM': if (new_path is not None) and", "configuration of the network, or None if no configuration file", "samples. \\n'+ 'TEM: transmission electron microscopy samples. ') requiredName.add_argument('-i', '--imgpath',", "}, \"TEM\":{ \"512\":0.01 } } return dict_size[str(type_acquisition)][str(model_input_size)] # Main loop", "range of values: [10-100]. \\n', default=25) ap._action_groups.reverse() # Processing the", "to access the model. :param overlap_value: the number of pixels", "of the functions def segment_image(path_testing_image, path_model, overlap_value, config, resolution_model, acquired_resolution", "else we load the default model. if type_acquisition == 'SEM':", ":return: the config dictionary. ''' # If string, convert to", "or many images based on the default segmentation models: SEM", "size ({2}).\\n\".format(height, width, acquired_resolution), \"The image size must be at", "in the current sample.', default=0) ap.add_argument('-o', '--overlap', required=False, type=int, help='Overlap", "'r') as fd: config_network = json.loads(fd.read()) except: raise ValueError(\"No configuration", "segmentation requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'], help='Type of acquisition to segment.", "path. The pixel size \\n'+ 'in that file will be", "load the default model. if type_acquisition == 'SEM': if (new_path", "file in image folder. \", \"Please provide a pixel size", "for segmentation pipeline fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+') img_name_original", "microscopy samples. ') requiredName.add_argument('-i', '--imgpath', required=True, nargs='+', help='Path to the", "'_seg-axonmyelin' + '.png' axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value,", "means less border effects but more time to perform the", "automatically one or many images based on the default segmentation", "is provided on the CLI if psm == None: #", "only image files (not already segmented or not masks) img_files", "RawTextHelpFormatter from tqdm import tqdm import pkg_resources import AxonDeepSeg import", "prediction. Higher value means less border effects but more time", "'Recommended range of values: [10-100]. \\n', default=25) ap._action_groups.reverse() # Processing", "config[\"trainingset_patchsize\"]) # Tuple of valid file extensions validExtensions = (", "config, resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segments the images", "is/are located.') ap.add_argument(\"-m\", \"--model\", required=False, help='Folder where the model is", "ads.imread(str(path_testing_images_folder / file_)) # Generate tmpfile for segmentation pipeline fp", "{0} segmented.\".format(str(path_testing_images_folder / file_))) # Remove temporary file used for", "Main loop def main(argv=None): ''' Main loop. :return: Exit code.", "'--sizepixel', required=False, help='Pixel size of the image(s) to segment, in", "'+str(default_overlap)+'\\n'+ 'Recommended range of values: [10-100]. \\n', default=25) ap._action_groups.reverse() #", "where to access the model :param overlap_value: the number of", "value.\" ) sys.exit(3) # Performing the segmentation over all folders", "the model is located. \\n'+ 'The default SEM model path", "+ '.png' axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=[segmented_image_name],", "the default segmentation models: SEM or # TEM. # #", "acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") else: print(\"The path(s) specified is/are not", "no resolution is provided on the CLI if psm ==", "of acquisition to segment. \\n'+ 'SEM: scanning electron microscopy samples.", "'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read())", "sys from pathlib import Path import json import argparse from", "a pixel size file exists, if so read it. if", "'If no pixel size is specified, a pixel_size_in_micrometer.txt \\n'+ 'file", "perform the segmentation. :param config: dict containing the configuration of", "modeL. :param type_acquisition: String, \"SEM\" or \"TEM\" :param model_input_size: String", "patches.\\n\".format(config[\"trainingset_patchsize\"], resolution_model), \"One of the dimensions of the image has", "help='Pixel size of the image(s) to segment, in micrometers. \\n'+", "fp = open(path_acquisition / '__tmp_segment__.png', 'wb+') img_name_original = acquisition_name.stem if", "acquired_resolution < minimum_resolution: print(\"EXCEPTION: The size of one of the", "required=False, help='Pixel size of the image(s) to segment, in micrometers.", "This script lets the user segment automatically one or many", "segment is/are located.') ap.add_argument(\"-m\", \"--model\", required=False, help='Folder where the model", "# Check if a pixel size file exists, if so", "verbosity_level=verbosity_level) print(\"Segmentation finished.\") else: print(\"The path(s) specified is/are not image(s).", "if not current_path_target.is_dir(): if current_path_target.suffix.lower() in validExtensions: # Handle cases", "model path is: \\n'+str(default_SEM_path)+'\\n'+ 'The default TEM model path is:", "height, width, _ = ads.imread(str(current_path_target)).shape except: try: height, width =", "path is: \\n'+str(default_SEM_path)+'\\n'+ 'The default TEM model path is: \\n'+str(default_TEM_path)+'\\n')", "Remove temporary file used for the segmentation fp.close() (path_acquisition /", "being processed in the current sample.', default=0) ap.add_argument('-o', '--overlap', required=False,", "argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) requiredName = ap.add_argument_group('required arguments') # Setting the arguments of", "dimensions of the image has a size of {0} after", "overlap_value=overlap_value, segmentations_filenames=segmented_image_name, resampled_resolutions=resolution_model, verbosity_level=verbosity_level, acquired_resolution=acquired_resolution, prediction_proba_activate=False, write_mode=True) if verbosity_level >=", "the information about the prediction step \\n'+ ' for the", "args[\"model\"] else None # Preparing the arguments to axon_segmentation function", "print(path_testing_images_folder / file_) try: height, width, _ = ads.imread(str(path_testing_images_folder /", "to png if not already done and adapt to model", "image(s) being segmented. \\n'+ '2: Also displays the information about", ":return: Exit code. 0: Success 2: Invalid argument value 3:", "/ file_)).shape except: try: height, width = ads.imread(str(path_testing_images_folder / file_)).shape", "'.png' axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=[segmented_image_name], acquired_resolution=acquired_resolution,", "value or file ''' print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__))) ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) requiredName", ":param new_path: Path to the model to use. :return: the", "'1: Also displays the path of the image(s) being segmented.", "located in those image folders) :param path_model: where to access", "those image folders) :param path_model: where to access the model.", "{0} after resampling to that resolution.\\n\".format(round(psm * min(image_size) / resolution_model)),", "convert_path(new_path) # Building the path of the requested model if", "default_SEM_path = MODELS_PATH / SEM_DEFAULT_MODEL_NAME default_TEM_path = MODELS_PATH / TEM_DEFAULT_MODEL_NAME", "\", \"Please provide a pixel size (using argument -s), or", "selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp,img, format='png') acquisition_name =", "image folders are located (the images to segment are located", "config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=segmented_image_name, resampled_resolutions=resolution_model, verbosity_level=verbosity_level, acquired_resolution=acquired_resolution, prediction_proba_activate=False, write_mode=True)", "the patch number being processed in the current sample.', default=0)", "''' Generates the dictionary version of the configuration file from", "are located (the images to segment are located in those", "import AxonDeepSeg import AxonDeepSeg.ads_utils as ads from AxonDeepSeg.apply_model import axon_segmentation", "to Path objects new_path = convert_path(new_path) # Building the path", "from tqdm import tqdm import pkg_resources import AxonDeepSeg import AxonDeepSeg.ads_utils", "config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=[segmented_image_name], acquired_resolution=acquired_resolution, verbosity_level=verbosity_level, resampled_resolutions=resolution_model, prediction_proba_activate=False, write_mode=True)", "verbosity. The higher, the more information is given about the", "default model corresponding to the type_model acquisition. :param type_model: String,", "segment_folders(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") sys.exit(0)", "def generate_config_dict(path_to_config_file): ''' Generates the dictionary version of the configuration", "folder \\n'+ 'where the image(s) to segment is/are located.') ap.add_argument(\"-m\",", "# Performing the segmentation over the image segment_image(current_path_target, path_model, overlap_value,", "bar for the segmentation. \\n'+ '1: Also displays the path", "SEM model path is: \\n'+str(default_SEM_path)+'\\n'+ 'The default TEM model path", "/ resolution_model)), \"Image file location: {0}\".format(str(path_testing_images_folder / file_)) ) sys.exit(2)", "size of the image(s) to segment, in micrometers. \\n'+ 'If", "path_testing_image: the path of the image to segment. :param path_model:", "= MODELS_PATH / SEM_DEFAULT_MODEL_NAME elif type_acquisition == 'TEM': if (new_path", "(default) : Displays the progress bar for the segmentation. \\n'+", "folder path. The pixel size \\n'+ 'in that file will", "pkg_resources import AxonDeepSeg import AxonDeepSeg.ads_utils as ads from AxonDeepSeg.apply_model import", "image files (not already segmented or not masks) img_files =", "config: dict containing the configuration of the network :param resolution_model:", ":param resolution_model: the resolution the model was trained on. :param", "in the image folders located in the path_testing_images_folder. :param path_testing_images_folder:", "# Main loop def main(argv=None): ''' Main loop. :return: Exit", "resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segments the images contained", "generate_resolution(type_acquisition, model_input_size): ''' Generates the resolution to use related to", "resolution_model = generate_resolution(type_, config[\"trainingset_patchsize\"]) # Tuple of valid file extensions", "segmentation models: SEM or # TEM. # # <NAME> -", "displays the patch number being processed in the current sample.',", "images based on the default segmentation models: SEM or #", "# # <NAME> - 2017-08-30 # Imports import sys from", "print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__))) ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) requiredName = ap.add_argument_group('required arguments') #", "') requiredName.add_argument('-i', '--imgpath', required=True, nargs='+', help='Path to the image to", "break else: # Handle cases if no resolution is provided", "+ '.png' # Performing the segmentation axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config,", "trained modeL. :param type_acquisition: String, \"SEM\" or \"TEM\" :param model_input_size:", "too small for the provided pixel size ({2}).\\n\".format(height, width, psm),", "except: raise ValueError(\"No configuration file available at this path.\") return", "new_path): ''' Generates the parameters used for segmentation for the", "verbosity_level=verbosity_level) print(\"Segmentation finished.\") sys.exit(0) # Calling the script if __name__", "img_name_original + '_seg-axonmyelin' + '.png' axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model',", "can improve the segmentation at patch borders, \\n'+ 'but also", "= config[\"trainingset_patchsize\"] * resolution_model / min(image_size) if psm < minimum_resolution:", "= open(current_path_target / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else: print(\"ERROR:", "0: Success 2: Invalid argument value 3: Missing value or", "choices=['SEM','TEM'], help='Type of acquisition to segment. \\n'+ 'SEM: scanning electron", ":param config: dict containing the configuration of the network :param", "type_acquisition == 'TEM': if (new_path is not None) and new_path.exists():", "== 'TEM': if (new_path is not None) and new_path.exists(): path_model", "= [file for file in path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff'))", "psm == None: # Check if a pixel size file", "more information is given about the segmentation process. :return: Nothing.", "convert to Path objects path_testing_image = convert_path(path_testing_image) path_model = convert_path(path_model)", "if path_testing_image.exists(): # Extracting the image name and its folder", "objects path_testing_image = convert_path(path_testing_image) path_model = convert_path(path_model) if path_testing_image.exists(): #", "Generates the parameters used for segmentation for the default model", "verbosity_level=0): ''' Segment the image located at the path_testing_image location.", "path_parts = path_testing_image.parts acquisition_name = Path(path_parts[-1]) path_acquisition = Path(*path_parts[:-1]) #", "= Path(*path_parts[:-1]) # Get type of model we are using", "if args[\"model\"] else None # Preparing the arguments to axon_segmentation", "convert_path # Global variables SEM_DEFAULT_MODEL_NAME = \"default_SEM_model_v1\" TEM_DEFAULT_MODEL_NAME = \"default_TEM_model_v1\"", "if (new_path is not None) and new_path.exists(): path_model = new_path", "import convert_path # Global variables SEM_DEFAULT_MODEL_NAME = \"default_SEM_model_v1\" TEM_DEFAULT_MODEL_NAME =", "/ file_)) ) sys.exit(2) selected_model = path_model.name # Read image", "ap.add_argument_group('required arguments') # Setting the arguments of the segmentation requiredName.add_argument('-t',", "(not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))] # Pre-processing: convert to png if not already", "''' # If string, convert to Path objects new_path =", "segment, in micrometers. \\n'+ 'If no pixel size is specified,", "path_model: where to access the model :param overlap_value: the number", "minimum_resolution = config[\"trainingset_patchsize\"] * resolution_model / min(image_size) if acquired_resolution <", "already done and adapt to model contrast for file_ in", "segmentation over all folders in the specified folder containing acquisitions", "and adapt to model contrast for file_ in tqdm(img_files, desc=\"Segmentation...\"):", "(the images to segment are located in those image folders)", "the configuration of the network, or None if no configuration", "large enough for given resolution to reach minimum patch size", "the network :param resolution_model: the resolution the model was trained", "(current_path_target / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target / 'pixel_size_in_micrometer.txt', 'r') psm", "None: psm = float(args[\"sizepixel\"]) else: psm = None path_target_list =", "Tuple of valid file extensions validExtensions = ( \".jpeg\", \".jpg\",", "overlap_value=overlap_value, segmentations_filenames=[segmented_image_name], acquired_resolution=acquired_resolution, verbosity_level=verbosity_level, resampled_resolutions=resolution_model, prediction_proba_activate=False, write_mode=True) if verbosity_level >=", "located at the path_testing_image location. :param path_testing_image: the path of", "and its folder path from the total path. path_parts =", "No pixel size is provided, and there is no pixel_size_in_micrometer.txt", "minimum_resolution = config[\"trainingset_patchsize\"] * resolution_model / min(image_size) if psm <", "exists, if so read it. if (current_path_target / 'pixel_size_in_micrometer.txt').exists(): resolution_file", "doing the segmentation. \\n'+ 'Higher values of overlap can improve", "path_testing_images_folder = convert_path(path_testing_images_folder) path_model = convert_path(path_model) # Update list of", "exists and was supplied, else we load the default model.", "add a pixel_size_in_micrometer.txt file \", \"containing the pixel size value.\"", "file used for the segmentation fp.close() (path_acquisition / '__tmp_segment__.png').unlink() else:", "selecting only image files (not already segmented or not masks)", "config[\"trainingset_patchsize\"] * resolution_model / min(image_size) if psm < minimum_resolution: print(\"EXCEPTION:", "the requested model if it exists and was supplied, else", "that image size is large enough for given resolution to", "if psm == None: # Check if a pixel size", "from AxonDeepSeg.apply_model import axon_segmentation from AxonDeepSeg.ads_utils import convert_path # Global", "image size is large enough for given resolution to reach", "Segmentation script # ------------------- # This script lets the user", "size file exists, if so read it. if (current_path_target /", ":param path_model: where to access the model. :param overlap_value: the", "Path(MODELS_PATH) default_SEM_path = MODELS_PATH / SEM_DEFAULT_MODEL_NAME default_TEM_path = MODELS_PATH /", "\\n'+ 'where the image(s) to segment is/are located.') ap.add_argument(\"-m\", \"--model\",", "located in the path_testing_images_folder. :param path_testing_images_folder: the folder where all", "{ \"SEM\":{ \"512\":0.1, \"256\":0.2 }, \"TEM\":{ \"512\":0.01 } } return", "AxonDeepSeg.ads_utils as ads from AxonDeepSeg.apply_model import axon_segmentation from AxonDeepSeg.ads_utils import", "the segmentation axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=segmented_image_name,", "no pixel_size_in_micrometer.txt file in image folder. \", \"Please provide a", "where to access the model. :param overlap_value: the number of", "25 # Definition of the functions def segment_image(path_testing_image, path_model, overlap_value,", "image_size = [height, width] minimum_resolution = config[\"trainingset_patchsize\"] * resolution_model /", "path_to_config_file = convert_path(path_to_config_file) try: with open(path_to_config_file, 'r') as fd: config_network", "ads.imread(str(current_path_target)).shape except Exception as e: raise e image_size = [height,", "already segmented or not masks) img_files = [file for file", "path(s) specified is/are not image(s). Please update the input path(s)", "of images to segment by selecting only image files (not", ":return: Float, the resolution of the model. ''' dict_size =", "lets the user segment automatically one or many images based", "used for the segmentation fp.close() (path_acquisition / '__tmp_segment__.png').unlink() else: print((\"The", "ads.imread(str(current_path_target)).shape except: try: height, width = ads.imread(str(current_path_target)).shape except Exception as", "information about the prediction step \\n'+ ' for the segmentation", "def generate_resolution(type_acquisition, model_input_size): ''' Generates the resolution to use related", "= file_.stem if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp,img,", "with open(path_to_config_file, 'r') as fd: config_network = json.loads(fd.read()) except: raise", "information is given about the segmentation process. :return: Nothing. '''", "# Performing the segmentation over all folders in the specified", "config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") sys.exit(0) # Calling the", "Segments the images contained in the image folders located in", "* min(image_size) / resolution_model)), \"Image file location: {0}\".format(str(path_testing_images_folder / file_))", "path_testing_images_folder: the folder where all image folders are located (the", "there is no pixel_size_in_micrometer.txt file in image folder. \", \"Please", "after resizing. try: height, width, _ = ads.imread(str(current_path_target)).shape except: try:", "new_path.exists(): path_model = new_path else: path_model = MODELS_PATH / TEM_DEFAULT_MODEL_NAME", "# Setting the arguments of the segmentation requiredName.add_argument('-t', '--type', required=True,", "segment or path to the folder \\n'+ 'where the image(s)", "to get the parameters from. :param new_path: Path to the", "new_path else: path_model = MODELS_PATH / TEM_DEFAULT_MODEL_NAME path_config_file = path_model", "to the trained modeL. :param type_acquisition: String, \"SEM\" or \"TEM\"", "== 'SEM': if (new_path is not None) and new_path.exists(): path_model", "path_testing_image location. :param path_testing_image: the path of the image to", "{0}x{0} after resampling to a resolution of {1} to create", "conversion img = ads.imread(str(path_testing_images_folder / file_)) # Generate tmpfile for", "is located. \\n'+ 'The default SEM model path is: \\n'+str(default_SEM_path)+'\\n'+", "= int(args[\"overlap\"]) if args[\"sizepixel\"] is not None: psm = float(args[\"sizepixel\"])", "is located. :return: dict containing the configuration of the network,", "path_model: where to access the model. :param overlap_value: the number", "file_) try: height, width, _ = ads.imread(str(path_testing_images_folder / file_)).shape except:", "not None: psm = float(args[\"sizepixel\"]) else: psm = None path_target_list", "file in path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff')) and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))]", "segmentations_filenames=segmented_image_name, resampled_resolutions=resolution_model, verbosity_level=verbosity_level, acquired_resolution=acquired_resolution, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1:", "to segment. :param path_model: where to access the model :param", "min(image_size) if acquired_resolution < minimum_resolution: print(\"EXCEPTION: The size of one", "to segment is/are located.') ap.add_argument(\"-m\", \"--model\", required=False, help='Folder where the", "None path_target_list = [Path(p) for p in args[\"imgpath\"]] new_path =", "if no resolution is provided on the CLI if psm", "\\n'+ 'SEM: scanning electron microscopy samples. \\n'+ 'TEM: transmission electron", "from argparse import RawTextHelpFormatter from tqdm import tqdm import pkg_resources", "axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=[segmented_image_name], acquired_resolution=acquired_resolution, verbosity_level=verbosity_level,", "except: try: height, width = ads.imread(str(current_path_target)).shape except Exception as e:", "contrast for file_ in tqdm(img_files, desc=\"Segmentation...\"): print(path_testing_images_folder / file_) try:", "the segmentation over all folders in the specified folder containing", "about the segmentation process. :return: Nothing. ''' # If string,", "'Higher values of overlap can improve the segmentation at patch", "# Segmentation script # ------------------- # This script lets the", "used for segmentation for the default model corresponding to the", "image has a size of {0} after resampling to that", "all image folders are located (the images to segment are", "the image folders located in the path_testing_images_folder. :param path_testing_images_folder: the", "to access the model :param overlap_value: the number of pixels", "provided pixel size ({2}).\\n\".format(height, width, acquired_resolution), \"The image size must", "width, acquired_resolution), \"The image size must be at least {0}x{0}", "if verbosity_level >= 1: tqdm.write(\"Image {0} segmented.\".format(str(path_testing_images_folder / file_))) #", "segment. :param path_model: where to access the model :param overlap_value:", "import tqdm import pkg_resources import AxonDeepSeg import AxonDeepSeg.ads_utils as ads", "be at least {0}x{0} after resampling to a resolution of", "\"SEM\":{ \"512\":0.1, \"256\":0.2 }, \"TEM\":{ \"512\":0.01 } } return dict_size[str(type_acquisition)][str(model_input_size)]", "\"512\":0.01 } } return dict_size[str(type_acquisition)][str(model_input_size)] # Main loop def main(argv=None):", "/ '__tmp_segment__.png', 'wb+') img_name_original = acquisition_name.stem if selected_model == \"default_TEM_model_v1\":", "on. :param verbosity_level: Level of verbosity. The higher, the more", "the resolution of the model. ''' dict_size = { \"SEM\":{", "path where the file config_network.json is located. :return: dict containing", "verbosity_level=verbosity_level, acquired_resolution=acquired_resolution, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: print((\"Image {0}", "is large enough for given resolution to reach minimum patch", "specified is/are not image(s). Please update the input path(s) and", "== \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp, img, format='png') acquisition_name =", "file config_network.json is located. :return: dict containing the configuration of", "Exception as e: raise e image_size = [height, width] minimum_resolution", "''' # If string, convert to Path objects path_testing_image =", "ap.add_argument(\"-m\", \"--model\", required=False, help='Folder where the model is located. \\n'+", "json.loads(fd.read()) except: raise ValueError(\"No configuration file available at this path.\")", "in path_target_list: if not current_path_target.is_dir(): if current_path_target.suffix.lower() in validExtensions: #", "format='png') else: ads.imwrite(fp, img, format='png') acquisition_name = Path(fp.name).name segmented_image_name =", "resolution_model / min(image_size) if acquired_resolution < minimum_resolution: print(\"EXCEPTION: The size", "model_input_size): ''' Generates the resolution to use related to the", "the functions def segment_image(path_testing_image, path_model, overlap_value, config, resolution_model, acquired_resolution =", "None def segment_folders(path_testing_images_folder, path_model, overlap_value, config, resolution_model, acquired_resolution = None,", "standard sized patches.\\n\".format(config[\"trainingset_patchsize\"], resolution_model), \"One of the dimensions of the", "(path_acquisition / '__tmp_segment__.png').unlink() else: print((\"The path {0} does not exist.\".format(path_testing_image)))", "SEM_DEFAULT_MODEL_NAME elif type_acquisition == 'TEM': if (new_path is not None)", "\".jpeg\", \".jpg\", \".tif\", \".tiff\", \".png\" ) # Going through all", "Setting the arguments of the segmentation requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'],", "to the type_model acquisition. :param type_model: String, the type of", "file available at this path.\") return config_network def generate_resolution(type_acquisition, model_input_size):", "in the specified folder containing acquisitions to segment. segment_folders(current_path_target, path_model,", "'__tmp_segment__.png').unlink() return None def generate_default_parameters(type_acquisition, new_path): ''' Generates the parameters", "of the network :param resolution_model: the resolution the model was", "e image_size = [height, width] minimum_resolution = config[\"trainingset_patchsize\"] * resolution_model", "print((\"The path {0} does not exist.\".format(path_testing_image))) return None def segment_folders(path_testing_images_folder,", "function path_model, config = generate_default_parameters(type_, new_path) resolution_model = generate_resolution(type_, config[\"trainingset_patchsize\"])", "file exists, if so read it. if (current_path_target / 'pixel_size_in_micrometer.txt').exists():", "= new_path else: path_model = MODELS_PATH / SEM_DEFAULT_MODEL_NAME elif type_acquisition", "convert_path(path_testing_image) path_model = convert_path(path_model) if path_testing_image.exists(): # Extracting the image", "TEM. # # <NAME> - 2017-08-30 # Imports import sys", "config = generate_default_parameters(type_, new_path) resolution_model = generate_resolution(type_, config[\"trainingset_patchsize\"]) # Tuple", "path. path_parts = path_testing_image.parts acquisition_name = Path(path_parts[-1]) path_acquisition = Path(*path_parts[:-1])", "Also displays the information about the prediction step \\n'+ '", "# Generate tmp file fp = open(path_acquisition / '__tmp_segment__.png', 'wb+')", "\"256\":0.2 }, \"TEM\":{ \"512\":0.01 } } return dict_size[str(type_acquisition)][str(model_input_size)] # Main", "path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") else: print(\"The", "\\n'+str(default_TEM_path)+'\\n') ap.add_argument('-s', '--sizepixel', required=False, help='Pixel size of the image(s) to", "the resolution to use related to the trained modeL. :param", "acquired_resolution=acquired_resolution, verbosity_level=verbosity_level, resampled_resolutions=resolution_model, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: tqdm.write(\"Image", "segmentation fp.close() (path_testing_images_folder / '__tmp_segment__.png').unlink() return None def generate_default_parameters(type_acquisition, new_path):", "if a pixel size file exists, if so read it.", "configuration of the network :param resolution_model: the resolution the model", "if so read it. if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists(): resolution_file =", "= convert_path(path_testing_images_folder) path_model = convert_path(path_model) # Update list of images", "[10-100]. \\n', default=25) ap._action_groups.reverse() # Processing the arguments args =", "models: SEM or # TEM. # # <NAME> - 2017-08-30", "is: \\n'+str(default_TEM_path)+'\\n') ap.add_argument('-s', '--sizepixel', required=False, help='Pixel size of the image(s)", "too small for the provided pixel size ({2}).\\n\".format(height, width, acquired_resolution),", "resolution to reach minimum patch size after resizing. try: height,", "to Path objects path_testing_images_folder = convert_path(path_testing_images_folder) path_model = convert_path(path_model) #", "+ '_seg-axonmyelin' + '.png' axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1,", "at this path.\") return config_network def generate_resolution(type_acquisition, model_input_size): ''' Generates", "default=0) ap.add_argument('-o', '--overlap', required=False, type=int, help='Overlap value (in pixels) of", "overlap_value: the number of pixels to be used for overlap", "print(\"ERROR: No pixel size is provided, and there is no", "found at the mentioned path. ''' # If string, convert", "\"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp,img, format='png') acquisition_name = Path(fp.name).name segmented_image_name", "also increase the segmentation time. \\n'+ 'Default value: '+str(default_overlap)+'\\n'+ 'Recommended", "the image(s) to segment is/are located.') ap.add_argument(\"-m\", \"--model\", required=False, help='Folder", "prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: print((\"Image {0} segmented.\".format(path_testing_image))) #", "int(args[\"verbose\"]) overlap_value = int(args[\"overlap\"]) if args[\"sizepixel\"] is not None: psm", "overlap_value = int(args[\"overlap\"]) if args[\"sizepixel\"] is not None: psm =", "segment automatically one or many images based on the default", "config = generate_config_dict(path_config_file) return path_model, config def generate_config_dict(path_to_config_file): ''' Generates", "done and adapt to model contrast for file_ in tqdm(img_files,", "the path_testing_images_folder. :param path_testing_images_folder: the folder where all image folders", "size value.\" ) sys.exit(3) # Performing the segmentation over all", "'0 (default) : Displays the progress bar for the segmentation.", "for the segmentation fp.close() (path_testing_images_folder / '__tmp_segment__.png').unlink() return None def", "model path is: \\n'+str(default_TEM_path)+'\\n') ap.add_argument('-s', '--sizepixel', required=False, help='Pixel size of", "'__tmp_segment__.png', 'wb+') img_name_original = acquisition_name.stem if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img,", "/ min(image_size) if psm < minimum_resolution: print(\"EXCEPTION: The size of", "Check that image size is large enough for given resolution", "int(args[\"overlap\"]) if args[\"sizepixel\"] is not None: psm = float(args[\"sizepixel\"]) else:", "path_model.name # Read image img = ads.imread(str(path_testing_image)) # Generate tmp", "png if not already done and adapt to model contrast", "segmentation. :param config: dict containing the configuration of the network", "segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png' # Performing the", "current_path_target in path_target_list: if not current_path_target.is_dir(): if current_path_target.suffix.lower() in validExtensions:", "is too small for the provided pixel size ({2}).\\n\".format(height, width,", "of overlap can improve the segmentation at patch borders, \\n'+", "ads.imread(str(path_testing_image)) # Generate tmp file fp = open(path_acquisition / '__tmp_segment__.png',", "time to perform the segmentation. :param config: dict containing the", "\"containing the pixel size value.\" ) sys.exit(3) # Performing the", "segmentation axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=segmented_image_name, resampled_resolutions=resolution_model,", "= None, verbosity_level=0): ''' Segments the images contained in the", "None: # Check if a pixel size file exists, if", "height, width = ads.imread(str(current_path_target)).shape except Exception as e: raise e", "required=True, choices=['SEM','TEM'], help='Type of acquisition to segment. \\n'+ 'SEM: scanning", "Higher value means less border effects but more time to", "config_network = json.loads(fd.read()) except: raise ValueError(\"No configuration file available at", "(in pixels) of the patches when doing the segmentation. \\n'+", "the segmentation at patch borders, \\n'+ 'but also increase the", "open(path_testing_images_folder / '__tmp_segment__.png', 'wb+') img_name_original = file_.stem if selected_model ==", "effects but more time to perform the segmentation. :param config:", "model. if type_acquisition == 'SEM': if (new_path is not None)", "the segmentation over the image segment_image(current_path_target, path_model, overlap_value, config, resolution_model,", "in args[\"imgpath\"]] new_path = Path(args[\"model\"]) if args[\"model\"] else None #", ">= 1: tqdm.write(\"Image {0} segmented.\".format(str(path_testing_images_folder / file_))) # Remove temporary", "input path(s) and try again.\") break else: # Handle cases", "If string, convert to Path objects path_to_config_file = convert_path(path_to_config_file) try:", "* resolution_model / min(image_size) if psm < minimum_resolution: print(\"EXCEPTION: The", "and try again.\") break else: # Handle cases if no", "finished.\") else: print(\"The path(s) specified is/are not image(s). Please update", "Please update the input path(s) and try again.\") break else:", "masks) img_files = [file for file in path_testing_images_folder.iterdir() if (file.suffix.lower()", "and new_path.exists(): path_model = new_path else: path_model = MODELS_PATH /", "= config[\"trainingset_patchsize\"] * resolution_model / min(image_size) if acquired_resolution < minimum_resolution:", "of verbosity. The higher, the more information is given about", "size \\n'+ 'in that file will be used for the", "prediction step \\n'+ ' for the segmentation of current sample.", "for current_path_target in path_target_list: if not current_path_target.is_dir(): if current_path_target.suffix.lower() in", "pixel size is specified, a pixel_size_in_micrometer.txt \\n'+ 'file needs to", "segmentation pipeline fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+') img_name_original =", "# If string, convert to Path objects new_path = convert_path(new_path)", "_ = ads.imread(str(path_testing_images_folder / file_)).shape except: try: height, width =", "pkg_resources.resource_filename('AxonDeepSeg', 'models') MODELS_PATH = Path(MODELS_PATH) default_SEM_path = MODELS_PATH / SEM_DEFAULT_MODEL_NAME", "objects path_to_config_file = convert_path(path_to_config_file) try: with open(path_to_config_file, 'r') as fd:", "size file exists, if so read it. if (current_path_target.parent /", "to Path objects path_to_config_file = convert_path(path_to_config_file) try: with open(path_to_config_file, 'r')", "# Going through all paths passed into arguments for current_path_target", "/ 'config_network.json' config = generate_config_dict(path_config_file) return path_model, config def generate_config_dict(path_to_config_file):", "= str(args[\"type\"]) verbosity_level = int(args[\"verbose\"]) overlap_value = int(args[\"overlap\"]) if args[\"sizepixel\"]", "not current_path_target.is_dir(): if current_path_target.suffix.lower() in validExtensions: # Handle cases if", "= Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png' #", "('.png','.jpg','.jpeg','.tif','.tiff')) and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))] # Pre-processing: convert to png if", "# This script lets the user segment automatically one or", "width, psm), \"The image size must be at least {0}x{0}", "If string, convert to Path objects path_testing_image = convert_path(path_testing_image) path_model", "else None # Preparing the arguments to axon_segmentation function path_model,", "cases if no resolution is provided on the CLI if", "3: Missing value or file ''' print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__))) ap =", "not exist.\".format(path_testing_image))) return None def segment_folders(path_testing_images_folder, path_model, overlap_value, config, resolution_model,", "The size of one of the images ({0}x{1}) is too", "float(resolution_file.read()) else: print(\"ERROR: No pixel size is provided, and there", "convert to Path objects new_path = convert_path(new_path) # Building the", "as fd: config_network = json.loads(fd.read()) except: raise ValueError(\"No configuration file", ":param path_testing_image: the path of the image to segment. :param", "given resolution to reach minimum patch size after resizing. try:", "the configuration of the network :param resolution_model: the resolution the", "pixel_size_in_micrometer.txt file \", \"containing the pixel size value.\" ) sys.exit(3)", "\\n'+ 'but also increase the segmentation time. \\n'+ 'Default value:", "of the image to segment. :param path_model: where to access", "this path.\") return config_network def generate_resolution(type_acquisition, model_input_size): ''' Generates the", "used for overlap when doing prediction. Higher value means less", "file_)) # Generate tmpfile for segmentation pipeline fp = open(path_testing_images_folder", "the image to segment. :param path_model: where to access the", "width = ads.imread(str(current_path_target)).shape except Exception as e: raise e image_size", "height, width, _ = ads.imread(str(path_testing_images_folder / file_)).shape except: try: height,", "the path_testing_image location. :param path_testing_image: the path of the image", "path_model, overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segments", "acquired_resolution), \"The image size must be at least {0}x{0} after", "segmentations_filenames=[segmented_image_name], acquired_resolution=acquired_resolution, verbosity_level=verbosity_level, resampled_resolutions=resolution_model, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1:", "The higher, the more information is given about the segmentation", "parameters from. :param new_path: Path to the model to use.", "Segment the image located at the path_testing_image location. :param path_testing_image:", "ap.add_argument('-v', '--verbose', required=False, type=int, choices=list(range(0,4)), help='Verbosity level. \\n'+ '0 (default)", "overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") sys.exit(0) # Calling", "of the image(s) to segment, in micrometers. \\n'+ 'If no", "segmentation for the default model corresponding to the type_model acquisition.", "if args[\"sizepixel\"] is not None: psm = float(args[\"sizepixel\"]) else: psm", "/ resolution_model)), \"Image file location: {0}\".format(current_path_target) ) sys.exit(2) # Performing", "format='png') else: ads.imwrite(fp,img, format='png') acquisition_name = Path(fp.name).name segmented_image_name = img_name_original", "resolution_model: the resolution the model was trained on. :param verbosity_level:", "location: {0}\".format(str(path_testing_images_folder / file_)) ) sys.exit(2) selected_model = path_model.name #", ">= 1: print((\"Image {0} segmented.\".format(path_testing_image))) # Remove temporary file used", "about the prediction step \\n'+ ' for the segmentation of", "the dictionary version of the configuration file from the path", "\\n'+ 'Default value: '+str(default_overlap)+'\\n'+ 'Recommended range of values: [10-100]. \\n',", "if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r')", "from the path where it is located. :param path_to_config: relative", "args[\"sizepixel\"] is not None: psm = float(args[\"sizepixel\"]) else: psm =", "/ 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target / 'pixel_size_in_micrometer.txt', 'r') psm =", "value (in pixels) of the patches when doing the segmentation.", "at the path_testing_image location. :param path_testing_image: the path of the", "== \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp,img, format='png') acquisition_name = Path(fp.name).name", "size must be at least {0}x{0} after resampling to a", "ads from AxonDeepSeg.apply_model import axon_segmentation from AxonDeepSeg.ads_utils import convert_path #", "resolution the model was trained on. :param verbosity_level: Level of", "segmentation of current sample. \\n'+ '3: Also displays the patch", "type=int, help='Overlap value (in pixels) of the patches when doing", "samples. ') requiredName.add_argument('-i', '--imgpath', required=True, nargs='+', help='Path to the image", "the segmentation. \\n'+ '1: Also displays the path of the", "if it exists and was supplied, else we load the", "def generate_default_parameters(type_acquisition, new_path): ''' Generates the parameters used for segmentation", "< minimum_resolution: print(\"EXCEPTION: The size of one of the images", "specified, a pixel_size_in_micrometer.txt \\n'+ 'file needs to be added to", "if not already done and adapt to model contrast for", "model :param overlap_value: the number of pixels to be used", "patch size after resizing. try: height, width, _ = ads.imread(str(current_path_target)).shape", "pixels to be used for overlap when doing prediction. Higher", "ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) requiredName = ap.add_argument_group('required arguments') # Setting the", "return path_model, config def generate_config_dict(path_to_config_file): ''' Generates the dictionary version", "the file config_network.json is located. :return: dict containing the configuration", "resolution.\\n\".format(round(psm * min(image_size) / resolution_model)), \"Image file location: {0}\".format(current_path_target) )", "Path(path_parts[-1]) path_acquisition = Path(*path_parts[:-1]) # Get type of model we", "import AxonDeepSeg.ads_utils as ads from AxonDeepSeg.apply_model import axon_segmentation from AxonDeepSeg.ads_utils", "configuration file from the path where it is located. :param", "\\n'+ 'file needs to be added to the image folder", "config dictionary. ''' # If string, convert to Path objects", "be used for the segmentation.', default=None) ap.add_argument('-v', '--verbose', required=False, type=int,", "generate_config_dict(path_to_config_file): ''' Generates the dictionary version of the configuration file", "in ('.png','.jpg','.jpeg','.tif','.tiff')) and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))] # Pre-processing: convert to png", "arguments') # Setting the arguments of the segmentation requiredName.add_argument('-t', '--type',", "the model was trained on. :param verbosity_level: Level of verbosity.", "of current sample. \\n'+ '3: Also displays the patch number", "choices=list(range(0,4)), help='Verbosity level. \\n'+ '0 (default) : Displays the progress", "open(path_acquisition / '__tmp_segment__.png', 'wb+') img_name_original = acquisition_name.stem if selected_model ==", "segmentation over the image segment_image(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm,", "micrometers. \\n'+ 'If no pixel size is specified, a pixel_size_in_micrometer.txt", "\"Please provide a pixel size (using argument -s), or add", "patches when doing the segmentation. \\n'+ 'Higher values of overlap", "to the model to use. :return: the config dictionary. '''", "Generates the resolution to use related to the trained modeL.", "\"One of the dimensions of the image has a size", ":param model_input_size: String or Int, the size of the input.", "== None: # Check if a pixel size file exists,", "file ''' print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__))) ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) requiredName = ap.add_argument_group('required", "to model contrast for file_ in tqdm(img_files, desc=\"Segmentation...\"): print(path_testing_images_folder /", "a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt", "TEM_DEFAULT_MODEL_NAME path_config_file = path_model / 'config_network.json' config = generate_config_dict(path_config_file) return", "= ads.imread(str(path_testing_images_folder / file_)).shape except Exception as e: raise e", "size is provided, and there is no pixel_size_in_micrometer.txt file in", "sys.exit(3) # Check that image size is large enough for", "exists, if so read it. if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists(): resolution_file", "the network, or None if no configuration file was found", "import sys from pathlib import Path import json import argparse", "so read it. if (current_path_target / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target", "image img = ads.imread(str(path_testing_image)) # Generate tmp file fp =", "acquisitions to segment. segment_folders(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level)", "the segmentation fp.close() (path_testing_images_folder / '__tmp_segment__.png').unlink() return None def generate_default_parameters(type_acquisition,", "sys.exit(3) # Performing the segmentation over all folders in the", "e: raise e image_size = [height, width] minimum_resolution = config[\"trainingset_patchsize\"]", "resizing. try: height, width, _ = ads.imread(str(current_path_target)).shape except: try: height,", "ads.imread(str(path_testing_images_folder / file_)).shape except Exception as e: raise e image_size", "using selected_model = path_model.name # Read image img = ads.imread(str(path_testing_image))", "resolution to use related to the trained modeL. :param type_acquisition:", "ads.imwrite(fp,img, format='png') acquisition_name = Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin'", "\"--model\", required=False, help='Folder where the model is located. \\n'+ 'The", "the mentioned path. ''' # If string, convert to Path", "= json.loads(fd.read()) except: raise ValueError(\"No configuration file available at this", "sys.exit(2) selected_model = path_model.name # Read image for conversion img", "update the input path(s) and try again.\") break else: #", "use related to the trained modeL. :param type_acquisition: String, \"SEM\"", "img_name_original = acquisition_name.stem if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else:", "provide a pixel size (using argument -s), or add a", "dict_size[str(type_acquisition)][str(model_input_size)] # Main loop def main(argv=None): ''' Main loop. :return:", "path.\") return config_network def generate_resolution(type_acquisition, model_input_size): ''' Generates the resolution", "acquisition_name = Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'", "not already done and adapt to model contrast for file_", "'3: Also displays the patch number being processed in the", "2017-08-30 # Imports import sys from pathlib import Path import", "# Get type of model we are using selected_model =", "argument value 3: Missing value or file ''' print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__)))", "'but also increase the segmentation time. \\n'+ 'Default value: '+str(default_overlap)+'\\n'+", "improve the segmentation at patch borders, \\n'+ 'but also increase", "level. \\n'+ '0 (default) : Displays the progress bar for", "less border effects but more time to perform the segmentation.", "model corresponding to the type_model acquisition. :param type_model: String, the", "Missing value or file ''' print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__))) ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)", "'--imgpath', required=True, nargs='+', help='Path to the image to segment or", "If string, convert to Path objects path_testing_images_folder = convert_path(path_testing_images_folder) path_model", "when doing prediction. Higher value means less border effects but", "segmentation. \\n'+ '1: Also displays the path of the image(s)", "to be added to the image folder path. The pixel", "({2}).\\n\".format(height, width, acquired_resolution), \"The image size must be at least", "path_acquisition = Path(*path_parts[:-1]) # Get type of model we are", "for overlap when doing prediction. Higher value means less border", "convert_path(path_to_config_file) try: with open(path_to_config_file, 'r') as fd: config_network = json.loads(fd.read())", "path_model = MODELS_PATH / TEM_DEFAULT_MODEL_NAME path_config_file = path_model / 'config_network.json'", "fp.close() (path_acquisition / '__tmp_segment__.png').unlink() else: print((\"The path {0} does not", "in validExtensions: # Handle cases if no resolution is provided", "where the model is located. \\n'+ 'The default SEM model", "return None def generate_default_parameters(type_acquisition, new_path): ''' Generates the parameters used", "= { \"SEM\":{ \"512\":0.1, \"256\":0.2 }, \"TEM\":{ \"512\":0.01 } }", "ap.add_argument('-s', '--sizepixel', required=False, help='Pixel size of the image(s) to segment,", "to reach minimum patch size after resizing. try: height, width,", "as e: raise e image_size = [height, width] minimum_resolution =", "} return dict_size[str(type_acquisition)][str(model_input_size)] # Main loop def main(argv=None): ''' Main", "in micrometers. \\n'+ 'If no pixel size is specified, a", "after resampling to a resolution of {1} to create standard", "size after resizing. try: height, width, _ = ads.imread(str(current_path_target)).shape except:", "Pre-processing: convert to png if not already done and adapt", "of the requested model if it exists and was supplied,", "needs to be added to the image folder path. The", "path_model = MODELS_PATH / SEM_DEFAULT_MODEL_NAME elif type_acquisition == 'TEM': if", "Get type of model we are using selected_model = path_model.name", "= 25 # Definition of the functions def segment_image(path_testing_image, path_model,", "higher, the more information is given about the segmentation process.", "overlap can improve the segmentation at patch borders, \\n'+ 'but", "model to use. :return: the config dictionary. ''' # If", "Generate tmp file fp = open(path_acquisition / '__tmp_segment__.png', 'wb+') img_name_original", "file location: {0}\".format(str(path_testing_images_folder / file_)) ) sys.exit(2) selected_model = path_model.name", "generate_default_parameters(type_acquisition, new_path): ''' Generates the parameters used for segmentation for", ":return: Nothing. ''' # If string, convert to Path objects", "temporary file used for the segmentation fp.close() (path_acquisition / '__tmp_segment__.png').unlink()", "print(\"Segmentation finished.\") else: print(\"The path(s) specified is/are not image(s). Please", "{0}\".format(str(path_testing_images_folder / file_)) ) sys.exit(2) selected_model = path_model.name # Read", "resolution_file = open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else:", "= ads.imread(str(current_path_target)).shape except Exception as e: raise e image_size =", "\\n', default=25) ap._action_groups.reverse() # Processing the arguments args = vars(ap.parse_args(argv))", "# Tuple of valid file extensions validExtensions = ( \".jpeg\",", "file used for the segmentation fp.close() (path_testing_images_folder / '__tmp_segment__.png').unlink() return", "path_model = convert_path(path_model) # Update list of images to segment", "the segmentation. :param config: dict containing the configuration of the", "image folders) :param path_model: where to access the model. :param", "path_to_config: relative path where the file config_network.json is located. :return:", "the segmentation. \\n'+ 'Higher values of overlap can improve the", "raise e image_size = [height, width] minimum_resolution = config[\"trainingset_patchsize\"] *", "ap._action_groups.reverse() # Processing the arguments args = vars(ap.parse_args(argv)) type_ =", "{0} segmented.\".format(path_testing_image))) # Remove temporary file used for the segmentation", "if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff')) and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))] # Pre-processing: convert", "the default model. if type_acquisition == 'SEM': if (new_path is", "# Preparing the arguments to axon_segmentation function path_model, config =", "functions def segment_image(path_testing_image, path_model, overlap_value, config, resolution_model, acquired_resolution = None,", "/ 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else: print(\"ERROR: No pixel", "the segmentation requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'], help='Type of acquisition to", "\"The image size must be at least {0}x{0} after resampling", "psm = float(args[\"sizepixel\"]) else: psm = None path_target_list = [Path(p)", "else: psm = None path_target_list = [Path(p) for p in", "'--overlap', required=False, type=int, help='Overlap value (in pixels) of the patches", "ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp, img, format='png') acquisition_name = Path(fp.name).name segmented_image_name", "file location: {0}\".format(current_path_target) ) sys.exit(2) # Performing the segmentation over", "time. \\n'+ 'Default value: '+str(default_overlap)+'\\n'+ 'Recommended range of values: [10-100].", "try: height, width = ads.imread(str(path_testing_images_folder / file_)).shape except Exception as", "doing prediction. Higher value means less border effects but more", "fd: config_network = json.loads(fd.read()) except: raise ValueError(\"No configuration file available", "located. :param path_to_config: relative path where the file config_network.json is", "width = ads.imread(str(path_testing_images_folder / file_)).shape except Exception as e: raise", "ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=[segmented_image_name], acquired_resolution=acquired_resolution, verbosity_level=verbosity_level, resampled_resolutions=resolution_model, prediction_proba_activate=False, write_mode=True) if", "more time to perform the segmentation. :param config: dict containing", "min(image_size) / resolution_model)), \"Image file location: {0}\".format(str(path_testing_images_folder / file_)) )", "to axon_segmentation function path_model, config = generate_default_parameters(type_, new_path) resolution_model =", "acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=[segmented_image_name], acquired_resolution=acquired_resolution, verbosity_level=verbosity_level, resampled_resolutions=resolution_model,", "arguments for current_path_target in path_target_list: if not current_path_target.is_dir(): if current_path_target.suffix.lower()", "requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'], help='Type of acquisition to segment. \\n'+", "= generate_config_dict(path_config_file) return path_model, config def generate_config_dict(path_to_config_file): ''' Generates the", "''' Main loop. :return: Exit code. 0: Success 2: Invalid", "# Update list of images to segment by selecting only", "to segment or path to the folder \\n'+ 'where the", "try again.\") break else: # Handle cases if no resolution", "tmpfile for segmentation pipeline fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+')", "to segment, in micrometers. \\n'+ 'If no pixel size is", "located.') ap.add_argument(\"-m\", \"--model\", required=False, help='Folder where the model is located.", "file_.stem if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp,img, format='png')", "pipeline fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+') img_name_original = file_.stem", "help='Folder where the model is located. \\n'+ 'The default SEM", "\\n'+ 'If no pixel size is specified, a pixel_size_in_micrometer.txt \\n'+", "help='Path to the image to segment or path to the", "config_network def generate_resolution(type_acquisition, model_input_size): ''' Generates the resolution to use", "path_model, overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segment", "to that resolution.\\n\".format(round(acquired_resolution * min(image_size) / resolution_model)), \"Image file location:", "of the images ({0}x{1}) is too small for the provided", "# Global variables SEM_DEFAULT_MODEL_NAME = \"default_SEM_model_v1\" TEM_DEFAULT_MODEL_NAME = \"default_TEM_model_v1\" MODELS_PATH", "tqdm(img_files, desc=\"Segmentation...\"): print(path_testing_images_folder / file_) try: height, width, _ =", "the segmentation time. \\n'+ 'Default value: '+str(default_overlap)+'\\n'+ 'Recommended range of", "'SEM: scanning electron microscopy samples. \\n'+ 'TEM: transmission electron microscopy", "overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") else: print(\"The path(s)", "# If string, convert to Path objects path_testing_images_folder = convert_path(path_testing_images_folder)", "path_target_list: if not current_path_target.is_dir(): if current_path_target.suffix.lower() in validExtensions: # Handle", "Nothing. ''' # If string, convert to Path objects path_testing_image", "pixel size ({2}).\\n\".format(height, width, psm), \"The image size must be", "resolution_model / min(image_size) if psm < minimum_resolution: print(\"EXCEPTION: The size", "# If string, convert to Path objects path_testing_image = convert_path(path_testing_image)", "to use related to the trained modeL. :param type_acquisition: String,", "pixel size value.\" ) sys.exit(3) # Performing the segmentation over", "pixel_size_in_micrometer.txt \\n'+ 'file needs to be added to the image", "the folder where all image folders are located (the images", "str(args[\"type\"]) verbosity_level = int(args[\"verbose\"]) overlap_value = int(args[\"overlap\"]) if args[\"sizepixel\"] is", "AxonDeepSeg.ads_utils import convert_path # Global variables SEM_DEFAULT_MODEL_NAME = \"default_SEM_model_v1\" TEM_DEFAULT_MODEL_NAME", "loop def main(argv=None): ''' Main loop. :return: Exit code. 0:", "'config_network.json' config = generate_config_dict(path_config_file) return path_model, config def generate_config_dict(path_to_config_file): '''", "path_testing_image.parts acquisition_name = Path(path_parts[-1]) path_acquisition = Path(*path_parts[:-1]) # Get type", "by selecting only image files (not already segmented or not", "segmented.\".format(path_testing_image))) # Remove temporary file used for the segmentation fp.close()", "sample. \\n'+ '3: Also displays the patch number being processed", "validExtensions: # Handle cases if no resolution is provided on", "the image(s) to segment, in micrometers. \\n'+ 'If no pixel", "config_network.json is located. :return: dict containing the configuration of the", "path_testing_images_folder. :param path_testing_images_folder: the folder where all image folders are", "= ads.imread(str(path_testing_images_folder / file_)) # Generate tmpfile for segmentation pipeline", "a size of {0} after resampling to that resolution.\\n\".format(round(acquired_resolution *", "increase the segmentation time. \\n'+ 'Default value: '+str(default_overlap)+'\\n'+ 'Recommended range", "is given about the segmentation process. :return: Nothing. ''' #", "loop. :return: Exit code. 0: Success 2: Invalid argument value", "image to segment or path to the folder \\n'+ 'where", "in those image folders) :param path_model: where to access the", "json import argparse from argparse import RawTextHelpFormatter from tqdm import", "(not already segmented or not masks) img_files = [file for", "the input. :return: Float, the resolution of the model. '''", "pixel size \\n'+ 'in that file will be used for", "MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models') MODELS_PATH = Path(MODELS_PATH) default_SEM_path = MODELS_PATH", "is located. :param path_to_config: relative path where the file config_network.json", "Read image img = ads.imread(str(path_testing_image)) # Generate tmp file fp", "exist.\".format(path_testing_image))) return None def segment_folders(path_testing_images_folder, path_model, overlap_value, config, resolution_model, acquired_resolution", "verbosity_level: Level of verbosity. The higher, the more information is", "[Path(p) for p in args[\"imgpath\"]] new_path = Path(args[\"model\"]) if args[\"model\"]", "path to the folder \\n'+ 'where the image(s) to segment", "to that resolution.\\n\".format(round(psm * min(image_size) / resolution_model)), \"Image file location:", "ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp,img, format='png') acquisition_name = Path(fp.name).name segmented_image_name =", "/ file_)).shape except Exception as e: raise e image_size =", "acquisition to segment. \\n'+ 'SEM: scanning electron microscopy samples. \\n'+", "folders located in the path_testing_images_folder. :param path_testing_images_folder: the folder where", "''' Segment the image located at the path_testing_image location. :param", "at patch borders, \\n'+ 'but also increase the segmentation time.", "minimum patch size after resizing. try: height, width, _ =", "so read it. if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target.parent", "for file in path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff')) and (not", "inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=[segmented_image_name], acquired_resolution=acquired_resolution, verbosity_level=verbosity_level, resampled_resolutions=resolution_model, prediction_proba_activate=False, write_mode=True) if verbosity_level", "to Path objects path_testing_image = convert_path(path_testing_image) path_model = convert_path(path_model) if", "folders in the specified folder containing acquisitions to segment. segment_folders(current_path_target,", "None if no configuration file was found at the mentioned", "current_path_target.is_dir(): if current_path_target.suffix.lower() in validExtensions: # Handle cases if no", "current_path_target.suffix.lower() in validExtensions: # Handle cases if no resolution is", "print((\"Image {0} segmented.\".format(path_testing_image))) # Remove temporary file used for the", "required=False, help='Folder where the model is located. \\n'+ 'The default", "Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png' # Performing", "it is located. :param path_to_config: relative path where the file", "model_input_size: String or Int, the size of the input. :return:", "of pixels to be used for overlap when doing prediction.", "does not exist.\".format(path_testing_image))) return None def segment_folders(path_testing_images_folder, path_model, overlap_value, config,", "= argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) requiredName = ap.add_argument_group('required arguments') # Setting the arguments", "segmentation process. :return: Nothing. ''' # If string, convert to", "\"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp, img, format='png') acquisition_name = Path(fp.name).name", "\\n'+ ' for the segmentation of current sample. \\n'+ '3:", "MODELS_PATH / TEM_DEFAULT_MODEL_NAME default_overlap = 25 # Definition of the", "None def generate_default_parameters(type_acquisition, new_path): ''' Generates the parameters used for", "on the CLI if psm == None: # Check if", "ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=segmented_image_name, resampled_resolutions=resolution_model, verbosity_level=verbosity_level, acquired_resolution=acquired_resolution, prediction_proba_activate=False, write_mode=True) if", "we are using selected_model = path_model.name # Read image img", "resampling to that resolution.\\n\".format(round(psm * min(image_size) / resolution_model)), \"Image file", "is not None: psm = float(args[\"sizepixel\"]) else: psm = None", "for segmentation for the default model corresponding to the type_model", "default=25) ap._action_groups.reverse() # Processing the arguments args = vars(ap.parse_args(argv)) type_", "relative path where the file config_network.json is located. :return: dict", "process. :return: Nothing. ''' # If string, convert to Path", "file_ in tqdm(img_files, desc=\"Segmentation...\"): print(path_testing_images_folder / file_) try: height, width,", "(path_testing_images_folder / '__tmp_segment__.png').unlink() return None def generate_default_parameters(type_acquisition, new_path): ''' Generates", "+ '_seg-axonmyelin' + '.png' # Performing the segmentation axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name],", "tqdm.write(\"Image {0} segmented.\".format(str(path_testing_images_folder / file_))) # Remove temporary file used", "resolution_model)), \"Image file location: {0}\".format(current_path_target) ) sys.exit(2) # Performing the", "= Path(MODELS_PATH) default_SEM_path = MODELS_PATH / SEM_DEFAULT_MODEL_NAME default_TEM_path = MODELS_PATH", "acquired_resolution = None, verbosity_level=0): ''' Segment the image located at", "prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: tqdm.write(\"Image {0} segmented.\".format(str(path_testing_images_folder /", "to be used for overlap when doing prediction. Higher value", "of the segmentation requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'], help='Type of acquisition", "located. :return: dict containing the configuration of the network, or", "user segment automatically one or many images based on the", "def segment_folders(path_testing_images_folder, path_model, overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0):", "path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff')) and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))] # Pre-processing:", "'_seg-axonmyelin' + '.png' # Performing the segmentation axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name], path_model_folder=path_model,", "ads.imread(str(path_testing_images_folder / file_)).shape except: try: height, width = ads.imread(str(path_testing_images_folder /", "------------------- # This script lets the user segment automatically one", "pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file", "psm = None path_target_list = [Path(p) for p in args[\"imgpath\"]]", "all paths passed into arguments for current_path_target in path_target_list: if", "image folder. \", \"Please provide a pixel size (using argument", "ads.imwrite(fp, img, format='png') acquisition_name = Path(fp.name).name segmented_image_name = img_name_original +", "Going through all paths passed into arguments for current_path_target in", "requested model if it exists and was supplied, else we", "'in that file will be used for the segmentation.', default=None)", "image name and its folder path from the total path.", "# Definition of the functions def segment_image(path_testing_image, path_model, overlap_value, config,", "CLI if psm == None: # Check if a pixel", "value: '+str(default_overlap)+'\\n'+ 'Recommended range of values: [10-100]. \\n', default=25) ap._action_groups.reverse()", "the resolution the model was trained on. :param verbosity_level: Level", "Success 2: Invalid argument value 3: Missing value or file", "= vars(ap.parse_args(argv)) type_ = str(args[\"type\"]) verbosity_level = int(args[\"verbose\"]) overlap_value =", "folders are located (the images to segment are located in", "path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=segmented_image_name, resampled_resolutions=resolution_model, verbosity_level=verbosity_level, acquired_resolution=acquired_resolution, prediction_proba_activate=False,", "img_name_original = file_.stem if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else:", "containing the configuration of the network, or None if no", "segmentation time. \\n'+ 'Default value: '+str(default_overlap)+'\\n'+ 'Recommended range of values:", "one of the images ({0}x{1}) is too small for the", "# Imports import sys from pathlib import Path import json", "else: print((\"The path {0} does not exist.\".format(path_testing_image))) return None def", "the dimensions of the image has a size of {0}", "= generate_default_parameters(type_, new_path) resolution_model = generate_resolution(type_, config[\"trainingset_patchsize\"]) # Tuple of", "name and its folder path from the total path. path_parts", "objects new_path = convert_path(new_path) # Building the path of the", "the segmentation.', default=None) ap.add_argument('-v', '--verbose', required=False, type=int, choices=list(range(0,4)), help='Verbosity level.", "the pixel size value.\" ) sys.exit(3) # Performing the segmentation", "= convert_path(path_model) if path_testing_image.exists(): # Extracting the image name and", "that file will be used for the segmentation.', default=None) ap.add_argument('-v',", "acquisition_name.stem if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp, img,", "of the dimensions of the image has a size of", "the user segment automatically one or many images based on", "current sample. \\n'+ '3: Also displays the patch number being", "patch number being processed in the current sample.', default=0) ap.add_argument('-o',", "path from the total path. path_parts = path_testing_image.parts acquisition_name =", "the image to segment or path to the folder \\n'+", "def main(argv=None): ''' Main loop. :return: Exit code. 0: Success", "ap.add_argument('-o', '--overlap', required=False, type=int, help='Overlap value (in pixels) of the", "Performing the segmentation axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value,", "Path objects path_testing_images_folder = convert_path(path_testing_images_folder) path_model = convert_path(path_model) # Update", "was supplied, else we load the default model. if type_acquisition", "else: print(\"ERROR: No pixel size is provided, and there is", "Nothing. ''' # If string, convert to Path objects path_testing_images_folder", "of {0} after resampling to that resolution.\\n\".format(round(acquired_resolution * min(image_size) /", "' for the segmentation of current sample. \\n'+ '3: Also", "segmented.\".format(str(path_testing_images_folder / file_))) # Remove temporary file used for the", "resolution_model), \"One of the dimensions of the image has a", "to create standard sized patches.\\n\".format(config[\"trainingset_patchsize\"], resolution_model), \"One of the dimensions", "/ file_))) # Remove temporary file used for the segmentation", "path where it is located. :param path_to_config: relative path where", "displays the information about the prediction step \\n'+ ' for", "file_)).shape except: try: height, width = ads.imread(str(path_testing_images_folder / file_)).shape except", "or # TEM. # # <NAME> - 2017-08-30 # Imports", "'TEM': if (new_path is not None) and new_path.exists(): path_model =", "v.{}'.format(AxonDeepSeg.__version__))) ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) requiredName = ap.add_argument_group('required arguments') # Setting", "path of the requested model if it exists and was", "Invalid argument value 3: Missing value or file ''' print(('AxonDeepSeg", "acquired_resolution = None, verbosity_level=0): ''' Segments the images contained in", "step \\n'+ ' for the segmentation of current sample. \\n'+", "argparse from argparse import RawTextHelpFormatter from tqdm import tqdm import", "SEM or # TEM. # # <NAME> - 2017-08-30 #", "return config_network def generate_resolution(type_acquisition, model_input_size): ''' Generates the resolution to", "for the segmentation.', default=None) ap.add_argument('-v', '--verbose', required=False, type=int, choices=list(range(0,4)), help='Verbosity", "'SEM': if (new_path is not None) and new_path.exists(): path_model =", "and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))] # Pre-processing: convert to png if not", "the patches when doing the segmentation. \\n'+ 'Higher values of", "was found at the mentioned path. ''' # If string,", "= [Path(p) for p in args[\"imgpath\"]] new_path = Path(args[\"model\"]) if", "import json import argparse from argparse import RawTextHelpFormatter from tqdm", "the input path(s) and try again.\") break else: # Handle", "are located in those image folders) :param path_model: where to", "AxonDeepSeg import AxonDeepSeg.ads_utils as ads from AxonDeepSeg.apply_model import axon_segmentation from", "type_acquisition: String, \"SEM\" or \"TEM\" :param model_input_size: String or Int,", "segment. segment_folders(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\")", "img = ads.imread(str(path_testing_image)) # Generate tmp file fp = open(path_acquisition", "the provided pixel size ({2}).\\n\".format(height, width, acquired_resolution), \"The image size", "of the configuration file from the path where it is", "electron microscopy samples. ') requiredName.add_argument('-i', '--imgpath', required=True, nargs='+', help='Path to", "= generate_resolution(type_, config[\"trainingset_patchsize\"]) # Tuple of valid file extensions validExtensions", "provided, and there is no pixel_size_in_micrometer.txt file in image folder.", ":param type_model: String, the type of model to get the", "as ads from AxonDeepSeg.apply_model import axon_segmentation from AxonDeepSeg.ads_utils import convert_path", "dict containing the configuration of the network :param resolution_model: the", "the type_model acquisition. :param type_model: String, the type of model", "folder path from the total path. path_parts = path_testing_image.parts acquisition_name", "model was trained on. :param verbosity_level: Level of verbosity. The", "\".tiff\", \".png\" ) # Going through all paths passed into", "on the default segmentation models: SEM or # TEM. #", "overlap when doing prediction. Higher value means less border effects", "path of the image to segment. :param path_model: where to", "image(s) to segment is/are located.') ap.add_argument(\"-m\", \"--model\", required=False, help='Folder where", "folder. \", \"Please provide a pixel size (using argument -s),", "\", \"containing the pixel size value.\" ) sys.exit(3) # Check", "at least {0}x{0} after resampling to a resolution of {1}", "the image(s) being segmented. \\n'+ '2: Also displays the information", "string, convert to Path objects new_path = convert_path(new_path) # Building", "str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))] # Pre-processing: convert to png if not already done", "path is: \\n'+str(default_TEM_path)+'\\n') ap.add_argument('-s', '--sizepixel', required=False, help='Pixel size of the", "convert to Path objects path_to_config_file = convert_path(path_to_config_file) try: with open(path_to_config_file,", "to segment. \\n'+ 'SEM: scanning electron microscopy samples. \\n'+ 'TEM:", "enough for given resolution to reach minimum patch size after", "pixel size is provided, and there is no pixel_size_in_micrometer.txt file", "size of the input. :return: Float, the resolution of the", "verbosity_level=verbosity_level, resampled_resolutions=resolution_model, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: tqdm.write(\"Image {0}", "total path. path_parts = path_testing_image.parts acquisition_name = Path(path_parts[-1]) path_acquisition =", "file_)) ) sys.exit(2) selected_model = path_model.name # Read image for", "read it. if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target.parent /", "Update list of images to segment by selecting only image", "size of {0} after resampling to that resolution.\\n\".format(round(acquired_resolution * min(image_size)", "else: path_model = MODELS_PATH / TEM_DEFAULT_MODEL_NAME path_config_file = path_model /", "network, or None if no configuration file was found at", "transmission electron microscopy samples. ') requiredName.add_argument('-i', '--imgpath', required=True, nargs='+', help='Path", "(current_path_target.parent / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r') psm", "Path objects path_to_config_file = convert_path(path_to_config_file) try: with open(path_to_config_file, 'r') as", "\"default_TEM_model_v1\" MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models') MODELS_PATH = Path(MODELS_PATH) default_SEM_path =", "Main loop. :return: Exit code. 0: Success 2: Invalid argument", "segment_folders(path_testing_images_folder, path_model, overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0): '''", "try: with open(path_to_config_file, 'r') as fd: config_network = json.loads(fd.read()) except:", "to perform the segmentation. :param config: dict containing the configuration", "import RawTextHelpFormatter from tqdm import tqdm import pkg_resources import AxonDeepSeg", "segmentation at patch borders, \\n'+ 'but also increase the segmentation", "img_files = [file for file in path_testing_images_folder.iterdir() if (file.suffix.lower() in", "= open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else: print(\"ERROR:", "pixel size file exists, if so read it. if (current_path_target", "arguments of the segmentation requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'], help='Type of", "related to the trained modeL. :param type_acquisition: String, \"SEM\" or", "mentioned path. ''' # If string, convert to Path objects", "model we are using selected_model = path_model.name # Read image", "resolution of the model. ''' dict_size = { \"SEM\":{ \"512\":0.1,", "dictionary. ''' # If string, convert to Path objects new_path", "acquisition. :param type_model: String, the type of model to get", "/ '__tmp_segment__.png', 'wb+') img_name_original = file_.stem if selected_model == \"default_TEM_model_v1\":", "at the mentioned path. ''' # If string, convert to", "network :param resolution_model: the resolution the model was trained on.", "path_config_file = path_model / 'config_network.json' config = generate_config_dict(path_config_file) return path_model,", "string, convert to Path objects path_testing_images_folder = convert_path(path_testing_images_folder) path_model =", "where it is located. :param path_to_config: relative path where the", "size of {0} after resampling to that resolution.\\n\".format(round(psm * min(image_size)", "verbosity_level >= 1: print((\"Image {0} segmented.\".format(path_testing_image))) # Remove temporary file", "segmentation.', default=None) ap.add_argument('-v', '--verbose', required=False, type=int, choices=list(range(0,4)), help='Verbosity level. \\n'+", "/ 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r') psm =", "1: print((\"Image {0} segmented.\".format(path_testing_image))) # Remove temporary file used for", "temporary file used for the segmentation fp.close() (path_testing_images_folder / '__tmp_segment__.png').unlink()", "if so read it. if (current_path_target / 'pixel_size_in_micrometer.txt').exists(): resolution_file =", "= int(args[\"verbose\"]) overlap_value = int(args[\"overlap\"]) if args[\"sizepixel\"] is not None:", "\\n'+ '3: Also displays the patch number being processed in", "axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=segmented_image_name, resampled_resolutions=resolution_model, verbosity_level=verbosity_level,", "required=False, type=int, choices=list(range(0,4)), help='Verbosity level. \\n'+ '0 (default) : Displays", "the model :param overlap_value: the number of pixels to be", "img, format='png') acquisition_name = Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin'", "config[\"trainingset_patchsize\"] * resolution_model / min(image_size) if acquired_resolution < minimum_resolution: print(\"EXCEPTION:", "read it. if (current_path_target / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target /", "{0} does not exist.\".format(path_testing_image))) return None def segment_folders(path_testing_images_folder, path_model, overlap_value,", "resolution of {1} to create standard sized patches.\\n\".format(config[\"trainingset_patchsize\"], resolution_model), \"One", "\\n'+ 'Higher values of overlap can improve the segmentation at", "paths passed into arguments for current_path_target in path_target_list: if not", "where the file config_network.json is located. :return: dict containing the", "electron microscopy samples. \\n'+ 'TEM: transmission electron microscopy samples. ')", "return None def segment_folders(path_testing_images_folder, path_model, overlap_value, config, resolution_model, acquired_resolution =", "the images ({0}x{1}) is too small for the provided pixel", "default model. if type_acquisition == 'SEM': if (new_path is not", "if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp, img, format='png')", "value.\" ) sys.exit(3) # Check that image size is large", "''' print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__))) ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) requiredName = ap.add_argument_group('required arguments')", "= path_model.name # Read image for conversion img = ads.imread(str(path_testing_images_folder", "a pixel_size_in_micrometer.txt file \", \"containing the pixel size value.\" )", "for file_ in tqdm(img_files, desc=\"Segmentation...\"): print(path_testing_images_folder / file_) try: height,", "verbosity_level=0): ''' Segments the images contained in the image folders", "selected_model = path_model.name # Read image for conversion img =", "path_testing_image = convert_path(path_testing_image) path_model = convert_path(path_model) if path_testing_image.exists(): # Extracting", "width, _ = ads.imread(str(current_path_target)).shape except: try: height, width = ads.imread(str(current_path_target)).shape", "required=False, type=int, help='Overlap value (in pixels) of the patches when", "images contained in the image folders located in the path_testing_images_folder.", "the model to use. :return: the config dictionary. ''' #", "if type_acquisition == 'SEM': if (new_path is not None) and", "segmented. \\n'+ '2: Also displays the information about the prediction", "segmented or not masks) img_files = [file for file in", "image to segment. :param path_model: where to access the model", "location. :param path_testing_image: the path of the image to segment.", "acquisition_name = Path(path_parts[-1]) path_acquisition = Path(*path_parts[:-1]) # Get type of", "resampled_resolutions=resolution_model, verbosity_level=verbosity_level, acquired_resolution=acquired_resolution, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: print((\"Image", "of model to get the parameters from. :param new_path: Path", "# If string, convert to Path objects path_to_config_file = convert_path(path_to_config_file)", "reach minimum patch size after resizing. try: height, width, _", "that resolution.\\n\".format(round(acquired_resolution * min(image_size) / resolution_model)), \"Image file location: {0}\".format(str(path_testing_images_folder", "\\n'+ '2: Also displays the information about the prediction step", "files (not already segmented or not masks) img_files = [file", "validExtensions = ( \".jpeg\", \".jpg\", \".tif\", \".tiff\", \".png\" ) #", "from the total path. path_parts = path_testing_image.parts acquisition_name = Path(path_parts[-1])", "to the image folder path. The pixel size \\n'+ 'in", "Read image for conversion img = ads.imread(str(path_testing_images_folder / file_)) #", ": Displays the progress bar for the segmentation. \\n'+ '1:", "in image folder. \", \"Please provide a pixel size (using", "Definition of the functions def segment_image(path_testing_image, path_model, overlap_value, config, resolution_model,", "{0}\".format(current_path_target) ) sys.exit(2) # Performing the segmentation over the image", "2: Invalid argument value 3: Missing value or file '''", "the arguments of the segmentation requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'], help='Type", ":param type_acquisition: String, \"SEM\" or \"TEM\" :param model_input_size: String or", "to the image to segment or path to the folder", "used for the segmentation fp.close() (path_testing_images_folder / '__tmp_segment__.png').unlink() return None", "Int, the size of the input. :return: Float, the resolution", "into arguments for current_path_target in path_target_list: if not current_path_target.is_dir(): if", "'Default value: '+str(default_overlap)+'\\n'+ 'Recommended range of values: [10-100]. \\n', default=25)", "'--type', required=True, choices=['SEM','TEM'], help='Type of acquisition to segment. \\n'+ 'SEM:", "located. \\n'+ 'The default SEM model path is: \\n'+str(default_SEM_path)+'\\n'+ 'The", "path_model = new_path else: path_model = MODELS_PATH / TEM_DEFAULT_MODEL_NAME path_config_file", "given about the segmentation process. :return: Nothing. ''' # If", "for the segmentation of current sample. \\n'+ '3: Also displays", "path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=[segmented_image_name], acquired_resolution=acquired_resolution, verbosity_level=verbosity_level, resampled_resolutions=resolution_model, prediction_proba_activate=False,", "ValueError(\"No configuration file available at this path.\") return config_network def", "Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png' axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name],", "( \".jpeg\", \".jpg\", \".tif\", \".tiff\", \".png\" ) # Going through", "= open(path_acquisition / '__tmp_segment__.png', 'wb+') img_name_original = acquisition_name.stem if selected_model", "Displays the progress bar for the segmentation. \\n'+ '1: Also", "[file for file in path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff')) and", "size value.\" ) sys.exit(3) # Check that image size is", "TEM_DEFAULT_MODEL_NAME default_overlap = 25 # Definition of the functions def", "'2: Also displays the information about the prediction step \\n'+", "through all paths passed into arguments for current_path_target in path_target_list:", "the provided pixel size ({2}).\\n\".format(height, width, psm), \"The image size", "for p in args[\"imgpath\"]] new_path = Path(args[\"model\"]) if args[\"model\"] else", "small for the provided pixel size ({2}).\\n\".format(height, width, psm), \"The", "path_model, config = generate_default_parameters(type_, new_path) resolution_model = generate_resolution(type_, config[\"trainingset_patchsize\"]) #", "create standard sized patches.\\n\".format(config[\"trainingset_patchsize\"], resolution_model), \"One of the dimensions of", "convert to Path objects path_testing_images_folder = convert_path(path_testing_images_folder) path_model = convert_path(path_model)", "argument -s), or add a pixel_size_in_micrometer.txt file \", \"containing the", "or file ''' print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__))) ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) requiredName =", "when doing the segmentation. \\n'+ 'Higher values of overlap can", "must be at least {0}x{0} after resampling to a resolution", "# Read image img = ads.imread(str(path_testing_image)) # Generate tmp file", "= path_model / 'config_network.json' config = generate_config_dict(path_config_file) return path_model, config", "config def generate_config_dict(path_to_config_file): ''' Generates the dictionary version of the", "size is specified, a pixel_size_in_micrometer.txt \\n'+ 'file needs to be", "'r') psm = float(resolution_file.read()) else: print(\"ERROR: No pixel size is", "tqdm import pkg_resources import AxonDeepSeg import AxonDeepSeg.ads_utils as ads from", "type of model to get the parameters from. :param new_path:", "the segmentation process. :return: Nothing. ''' # If string, convert", "of valid file extensions validExtensions = ( \".jpeg\", \".jpg\", \".tif\",", "# Check that image size is large enough for given", "to segment by selecting only image files (not already segmented", "overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segment the", "/ min(image_size) if acquired_resolution < minimum_resolution: print(\"EXCEPTION: The size of", "not masks) img_files = [file for file in path_testing_images_folder.iterdir() if", ") sys.exit(3) # Performing the segmentation over all folders in", "input. :return: Float, the resolution of the model. ''' dict_size", "= path_testing_image.parts acquisition_name = Path(path_parts[-1]) path_acquisition = Path(*path_parts[:-1]) # Get", "for conversion img = ads.imread(str(path_testing_images_folder / file_)) # Generate tmpfile", "acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") sys.exit(0) # Calling the script if", "values of overlap can improve the segmentation at patch borders,", "try: height, width, _ = ads.imread(str(path_testing_images_folder / file_)).shape except: try:", "\", \"containing the pixel size value.\" ) sys.exit(3) # Performing", "else: print(\"The path(s) specified is/are not image(s). Please update the", ":param overlap_value: the number of pixels to be used for", "resolution_file = open(current_path_target / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else:", "image(s) to segment, in micrometers. \\n'+ 'If no pixel size", "default segmentation models: SEM or # TEM. # # <NAME>", "in the path_testing_images_folder. :param path_testing_images_folder: the folder where all image", "of the input. :return: Float, the resolution of the model.", "img_name_original + '_seg-axonmyelin' + '.png' # Performing the segmentation axon_segmentation(path_acquisitions_folders=path_acquisition,", "containing acquisitions to segment. segment_folders(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm,", "else: ads.imwrite(fp,img, format='png') acquisition_name = Path(fp.name).name segmented_image_name = img_name_original +", "number being processed in the current sample.', default=0) ap.add_argument('-o', '--overlap',", "use. :return: the config dictionary. ''' # If string, convert", "finished.\") sys.exit(0) # Calling the script if __name__ == '__main__':", "help='Verbosity level. \\n'+ '0 (default) : Displays the progress bar", ":param verbosity_level: Level of verbosity. The higher, the more information", "of the network, or None if no configuration file was", "variables SEM_DEFAULT_MODEL_NAME = \"default_SEM_model_v1\" TEM_DEFAULT_MODEL_NAME = \"default_TEM_model_v1\" MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg',", ":param path_model: where to access the model :param overlap_value: the", "= MODELS_PATH / TEM_DEFAULT_MODEL_NAME path_config_file = path_model / 'config_network.json' config", "= \"default_SEM_model_v1\" TEM_DEFAULT_MODEL_NAME = \"default_TEM_model_v1\" MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models') MODELS_PATH", "MODELS_PATH / SEM_DEFAULT_MODEL_NAME elif type_acquisition == 'TEM': if (new_path is", "= ( \".jpeg\", \".jpg\", \".tif\", \".tiff\", \".png\" ) # Going", "extensions validExtensions = ( \".jpeg\", \".jpg\", \".tif\", \".tiff\", \".png\" )", "objects path_testing_images_folder = convert_path(path_testing_images_folder) path_model = convert_path(path_model) # Update list", "if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp,img, format='png') acquisition_name", "# Remove temporary file used for the segmentation fp.close() (path_testing_images_folder", "print(\"The path(s) specified is/are not image(s). Please update the input", "'The default TEM model path is: \\n'+str(default_TEM_path)+'\\n') ap.add_argument('-s', '--sizepixel', required=False,", "access the model. :param overlap_value: the number of pixels to", "resolution_model)), \"Image file location: {0}\".format(str(path_testing_images_folder / file_)) ) sys.exit(2) selected_model", "= float(args[\"sizepixel\"]) else: psm = None path_target_list = [Path(p) for", "path of the image(s) being segmented. \\n'+ '2: Also displays", "file_)).shape except Exception as e: raise e image_size = [height,", "\"SEM\" or \"TEM\" :param model_input_size: String or Int, the size", "MODELS_PATH / TEM_DEFAULT_MODEL_NAME path_config_file = path_model / 'config_network.json' config =", "If string, convert to Path objects new_path = convert_path(new_path) #", "acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=segmented_image_name, resampled_resolutions=resolution_model, verbosity_level=verbosity_level, acquired_resolution=acquired_resolution,", "image folder path. The pixel size \\n'+ 'in that file", "border effects but more time to perform the segmentation. :param", "= open(path_testing_images_folder / '__tmp_segment__.png', 'wb+') img_name_original = file_.stem if selected_model", "trained on. :param verbosity_level: Level of verbosity. The higher, the", "verbosity_level = int(args[\"verbose\"]) overlap_value = int(args[\"overlap\"]) if args[\"sizepixel\"] is not", "config, resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segment the image", "the more information is given about the segmentation process. :return:", "microscopy samples. \\n'+ 'TEM: transmission electron microscopy samples. ') requiredName.add_argument('-i',", "segment. \\n'+ 'SEM: scanning electron microscopy samples. \\n'+ 'TEM: transmission", "file was found at the mentioned path. ''' # If", "* min(image_size) / resolution_model)), \"Image file location: {0}\".format(current_path_target) ) sys.exit(2)", "script # ------------------- # This script lets the user segment", "\".png\" ) # Going through all paths passed into arguments", "for the provided pixel size ({2}).\\n\".format(height, width, psm), \"The image", "contained in the image folders located in the path_testing_images_folder. :param", "patch borders, \\n'+ 'but also increase the segmentation time. \\n'+", "format='png') acquisition_name = Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin' +", "the images contained in the image folders located in the", "adapt to model contrast for file_ in tqdm(img_files, desc=\"Segmentation...\"): print(path_testing_images_folder", "resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") sys.exit(0) # Calling the script", "\\n'+ 'The default SEM model path is: \\n'+str(default_SEM_path)+'\\n'+ 'The default", "of one of the images ({0}x{1}) is too small for", "''' Generates the parameters used for segmentation for the default", "Path(*path_parts[:-1]) # Get type of model we are using selected_model", "Path objects path_testing_image = convert_path(path_testing_image) path_model = convert_path(path_model) if path_testing_image.exists():", "valid file extensions validExtensions = ( \".jpeg\", \".jpg\", \".tif\", \".tiff\",", "argparse import RawTextHelpFormatter from tqdm import tqdm import pkg_resources import", "Check if a pixel size file exists, if so read", "MODELS_PATH / SEM_DEFAULT_MODEL_NAME default_TEM_path = MODELS_PATH / TEM_DEFAULT_MODEL_NAME default_overlap =", "None # Preparing the arguments to axon_segmentation function path_model, config", "- 2017-08-30 # Imports import sys from pathlib import Path", "the total path. path_parts = path_testing_image.parts acquisition_name = Path(path_parts[-1]) path_acquisition", "print(\"EXCEPTION: The size of one of the images ({0}x{1}) is", "# Pre-processing: convert to png if not already done and", "generate_default_parameters(type_, new_path) resolution_model = generate_resolution(type_, config[\"trainingset_patchsize\"]) # Tuple of valid", "# ------------------- # This script lets the user segment automatically", "except Exception as e: raise e image_size = [height, width]", "Path import json import argparse from argparse import RawTextHelpFormatter from", "= Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png' axon_segmentation(path_acquisitions_folders=path_testing_images_folder,", "import axon_segmentation from AxonDeepSeg.ads_utils import convert_path # Global variables SEM_DEFAULT_MODEL_NAME", "\".jpg\", \".tif\", \".tiff\", \".png\" ) # Going through all paths", "image folders located in the path_testing_images_folder. :param path_testing_images_folder: the folder", "file extensions validExtensions = ( \".jpeg\", \".jpg\", \".tif\", \".tiff\", \".png\"", "\"512\":0.1, \"256\":0.2 }, \"TEM\":{ \"512\":0.01 } } return dict_size[str(type_acquisition)][str(model_input_size)] #", "import pkg_resources import AxonDeepSeg import AxonDeepSeg.ads_utils as ads from AxonDeepSeg.apply_model", "the arguments args = vars(ap.parse_args(argv)) type_ = str(args[\"type\"]) verbosity_level =", "requiredName = ap.add_argument_group('required arguments') # Setting the arguments of the", "available at this path.\") return config_network def generate_resolution(type_acquisition, model_input_size): '''", "path_target_list = [Path(p) for p in args[\"imgpath\"]] new_path = Path(args[\"model\"])", "over all folders in the specified folder containing acquisitions to", ":return: dict containing the configuration of the network, or None", "open(current_path_target / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else: print(\"ERROR: No", "the path of the image to segment. :param path_model: where", "= convert_path(new_path) # Building the path of the requested model", "the number of pixels to be used for overlap when", "resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") else: print(\"The path(s) specified is/are", "it exists and was supplied, else we load the default", "segmentation fp.close() (path_acquisition / '__tmp_segment__.png').unlink() else: print((\"The path {0} does", "default SEM model path is: \\n'+str(default_SEM_path)+'\\n'+ 'The default TEM model", "axon_segmentation from AxonDeepSeg.ads_utils import convert_path # Global variables SEM_DEFAULT_MODEL_NAME =", "# Performing the segmentation axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1,", "in tqdm(img_files, desc=\"Segmentation...\"): print(path_testing_images_folder / file_) try: height, width, _", "\\n'+ '1: Also displays the path of the image(s) being", "arguments args = vars(ap.parse_args(argv)) type_ = str(args[\"type\"]) verbosity_level = int(args[\"verbose\"])", "default=None) ap.add_argument('-v', '--verbose', required=False, type=int, choices=list(range(0,4)), help='Verbosity level. \\n'+ '0", "\\n'+str(default_SEM_path)+'\\n'+ 'The default TEM model path is: \\n'+str(default_TEM_path)+'\\n') ap.add_argument('-s', '--sizepixel',", "= MODELS_PATH / SEM_DEFAULT_MODEL_NAME default_TEM_path = MODELS_PATH / TEM_DEFAULT_MODEL_NAME default_overlap", "Also displays the path of the image(s) being segmented. \\n'+", "''' Segments the images contained in the image folders located", "from. :param new_path: Path to the model to use. :return:", "one or many images based on the default segmentation models:", "else: # Handle cases if no resolution is provided on", "path_model, config def generate_config_dict(path_to_config_file): ''' Generates the dictionary version of", "if psm < minimum_resolution: print(\"EXCEPTION: The size of one of", "segment_image(path_testing_image, path_model, overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0): '''", "or not masks) img_files = [file for file in path_testing_images_folder.iterdir()", "is provided, and there is no pixel_size_in_micrometer.txt file in image", "file fp = open(path_acquisition / '__tmp_segment__.png', 'wb+') img_name_original = acquisition_name.stem", "model contrast for file_ in tqdm(img_files, desc=\"Segmentation...\"): print(path_testing_images_folder / file_)", "config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") else: print(\"The path(s) specified", "* resolution_model / min(image_size) if acquired_resolution < minimum_resolution: print(\"EXCEPTION: The", "AxonDeepSeg.apply_model import axon_segmentation from AxonDeepSeg.ads_utils import convert_path # Global variables", "return dict_size[str(type_acquisition)][str(model_input_size)] # Main loop def main(argv=None): ''' Main loop.", "/ '__tmp_segment__.png').unlink() return None def generate_default_parameters(type_acquisition, new_path): ''' Generates the", "else: ads.imwrite(fp, img, format='png') acquisition_name = Path(fp.name).name segmented_image_name = img_name_original", "of values: [10-100]. \\n', default=25) ap._action_groups.reverse() # Processing the arguments", "the model. ''' dict_size = { \"SEM\":{ \"512\":0.1, \"256\":0.2 },", "generate_config_dict(path_config_file) return path_model, config def generate_config_dict(path_to_config_file): ''' Generates the dictionary", "it. if (current_path_target / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target / 'pixel_size_in_micrometer.txt',", "the config dictionary. ''' # If string, convert to Path", "has a size of {0} after resampling to that resolution.\\n\".format(round(acquired_resolution", "being segmented. \\n'+ '2: Also displays the information about the", "be used for overlap when doing prediction. Higher value means", "nargs='+', help='Path to the image to segment or path to", "code. 0: Success 2: Invalid argument value 3: Missing value", "= Path(args[\"model\"]) if args[\"model\"] else None # Preparing the arguments", "convert_path(path_model) if path_testing_image.exists(): # Extracting the image name and its", "the image segment_image(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation", "file from the path where it is located. :param path_to_config:", "requiredName.add_argument('-i', '--imgpath', required=True, nargs='+', help='Path to the image to segment", "/ file_) try: height, width, _ = ads.imread(str(path_testing_images_folder / file_)).shape", "new_path else: path_model = MODELS_PATH / SEM_DEFAULT_MODEL_NAME elif type_acquisition ==", "of the image(s) being segmented. \\n'+ '2: Also displays the", "Generates the dictionary version of the configuration file from the", "Path to the model to use. :return: the config dictionary.", "TEM model path is: \\n'+str(default_TEM_path)+'\\n') ap.add_argument('-s', '--sizepixel', required=False, help='Pixel size", "added to the image folder path. The pixel size \\n'+", "Path objects new_path = convert_path(new_path) # Building the path of", "Extracting the image name and its folder path from the", "has a size of {0} after resampling to that resolution.\\n\".format(round(psm", ") sys.exit(2) selected_model = path_model.name # Read image for conversion", "= convert_path(path_model) # Update list of images to segment by", "= \"default_TEM_model_v1\" MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models') MODELS_PATH = Path(MODELS_PATH) default_SEM_path", "resampling to a resolution of {1} to create standard sized", "MODELS_PATH = Path(MODELS_PATH) default_SEM_path = MODELS_PATH / SEM_DEFAULT_MODEL_NAME default_TEM_path =", "Building the path of the requested model if it exists", "no configuration file was found at the mentioned path. '''", "of the model. ''' dict_size = { \"SEM\":{ \"512\":0.1, \"256\":0.2", "open(path_to_config_file, 'r') as fd: config_network = json.loads(fd.read()) except: raise ValueError(\"No", "Exit code. 0: Success 2: Invalid argument value 3: Missing", "except: try: height, width = ads.imread(str(path_testing_images_folder / file_)).shape except Exception", "file will be used for the segmentation.', default=None) ap.add_argument('-v', '--verbose',", "convert to png if not already done and adapt to", "default TEM model path is: \\n'+str(default_TEM_path)+'\\n') ap.add_argument('-s', '--sizepixel', required=False, help='Pixel", "sample.', default=0) ap.add_argument('-o', '--overlap', required=False, type=int, help='Overlap value (in pixels)", "list of images to segment by selecting only image files", "min(image_size) / resolution_model)), \"Image file location: {0}\".format(current_path_target) ) sys.exit(2) #", "\".tif\", \".tiff\", \".png\" ) # Going through all paths passed", "not None) and new_path.exists(): path_model = new_path else: path_model =", "pixel size file exists, if so read it. if (current_path_target.parent", "selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp, img, format='png') acquisition_name", "and there is no pixel_size_in_micrometer.txt file in image folder. \",", "\\n'+ '0 (default) : Displays the progress bar for the", "'file needs to be added to the image folder path.", "new_path = Path(args[\"model\"]) if args[\"model\"] else None # Preparing the", "images to segment by selecting only image files (not already", "file exists, if so read it. if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists():", "''' # If string, convert to Path objects path_to_config_file =", "min(image_size) if psm < minimum_resolution: print(\"EXCEPTION: The size of one", "after resampling to that resolution.\\n\".format(round(psm * min(image_size) / resolution_model)), \"Image", "type_model acquisition. :param type_model: String, the type of model to", "= img_name_original + '_seg-axonmyelin' + '.png' # Performing the segmentation", "/ TEM_DEFAULT_MODEL_NAME path_config_file = path_model / 'config_network.json' config = generate_config_dict(path_config_file)", "of the patches when doing the segmentation. \\n'+ 'Higher values", "to segment are located in those image folders) :param path_model:", "raise ValueError(\"No configuration file available at this path.\") return config_network", "= ads.imread(str(current_path_target)).shape except: try: height, width = ads.imread(str(current_path_target)).shape except Exception", "the prediction step \\n'+ ' for the segmentation of current", "= float(resolution_file.read()) else: print(\"ERROR: No pixel size is provided, and", "version of the configuration file from the path where it", "configuration file available at this path.\") return config_network def generate_resolution(type_acquisition,", "default_overlap = 25 # Definition of the functions def segment_image(path_testing_image,", "/ SEM_DEFAULT_MODEL_NAME default_TEM_path = MODELS_PATH / TEM_DEFAULT_MODEL_NAME default_overlap = 25", "resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segment the image located", "type_model: String, the type of model to get the parameters", "\\n'+ 'TEM: transmission electron microscopy samples. ') requiredName.add_argument('-i', '--imgpath', required=True,", "# TEM. # # <NAME> - 2017-08-30 # Imports import", "sized patches.\\n\".format(config[\"trainingset_patchsize\"], resolution_model), \"One of the dimensions of the image", "resolution is provided on the CLI if psm == None:", "the size of the input. :return: Float, the resolution of", "desc=\"Segmentation...\"): print(path_testing_images_folder / file_) try: height, width, _ = ads.imread(str(path_testing_images_folder", "'wb+') img_name_original = file_.stem if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png')", "path_model / 'config_network.json' config = generate_config_dict(path_config_file) return path_model, config def", "the parameters from. :param new_path: Path to the model to", "for the provided pixel size ({2}).\\n\".format(height, width, acquired_resolution), \"The image", "a resolution of {1} to create standard sized patches.\\n\".format(config[\"trainingset_patchsize\"], resolution_model),", "type of model we are using selected_model = path_model.name #", "borders, \\n'+ 'but also increase the segmentation time. \\n'+ 'Default", "else: path_model = MODELS_PATH / SEM_DEFAULT_MODEL_NAME elif type_acquisition == 'TEM':", "arguments to axon_segmentation function path_model, config = generate_default_parameters(type_, new_path) resolution_model", "'models') MODELS_PATH = Path(MODELS_PATH) default_SEM_path = MODELS_PATH / SEM_DEFAULT_MODEL_NAME default_TEM_path", "type=int, choices=list(range(0,4)), help='Verbosity level. \\n'+ '0 (default) : Displays the", "the parameters used for segmentation for the default model corresponding", "the current sample.', default=0) ap.add_argument('-o', '--overlap', required=False, type=int, help='Overlap value", "to a resolution of {1} to create standard sized patches.\\n\".format(config[\"trainingset_patchsize\"],", "provided pixel size ({2}).\\n\".format(height, width, psm), \"The image size must", "path_model = convert_path(path_model) if path_testing_image.exists(): # Extracting the image name", "get the parameters from. :param new_path: Path to the model", "String, the type of model to get the parameters from.", "file \", \"containing the pixel size value.\" ) sys.exit(3) #", "= Path(path_parts[-1]) path_acquisition = Path(*path_parts[:-1]) # Get type of model", "for the segmentation fp.close() (path_acquisition / '__tmp_segment__.png').unlink() else: print((\"The path", "fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+') img_name_original = file_.stem if", "many images based on the default segmentation models: SEM or", "write_mode=True) if verbosity_level >= 1: tqdm.write(\"Image {0} segmented.\".format(str(path_testing_images_folder / file_)))", "required=True, nargs='+', help='Path to the image to segment or path", "images ({0}x{1}) is too small for the provided pixel size", "where all image folders are located (the images to segment", "inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=segmented_image_name, resampled_resolutions=resolution_model, verbosity_level=verbosity_level, acquired_resolution=acquired_resolution, prediction_proba_activate=False, write_mode=True) if verbosity_level", "'TEM: transmission electron microscopy samples. ') requiredName.add_argument('-i', '--imgpath', required=True, nargs='+',", "'__tmp_segment__.png').unlink() else: print((\"The path {0} does not exist.\".format(path_testing_image))) return None", "again.\") break else: # Handle cases if no resolution is", "width, _ = ads.imread(str(path_testing_images_folder / file_)).shape except: try: height, width", "Imports import sys from pathlib import Path import json import", "containing the configuration of the network :param resolution_model: the resolution", "Performing the segmentation over all folders in the specified folder", "float(args[\"sizepixel\"]) else: psm = None path_target_list = [Path(p) for p", "({0}x{1}) is too small for the provided pixel size ({2}).\\n\".format(height,", "'where the image(s) to segment is/are located.') ap.add_argument(\"-m\", \"--model\", required=False,", "TEM_DEFAULT_MODEL_NAME = \"default_TEM_model_v1\" MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models') MODELS_PATH = Path(MODELS_PATH)", "new_path.exists(): path_model = new_path else: path_model = MODELS_PATH / SEM_DEFAULT_MODEL_NAME", "minimum_resolution: print(\"EXCEPTION: The size of one of the images ({0}x{1})", "Performing the segmentation over the image segment_image(current_path_target, path_model, overlap_value, config,", "used for the segmentation.', default=None) ap.add_argument('-v', '--verbose', required=False, type=int, choices=list(range(0,4)),", "= MODELS_PATH / TEM_DEFAULT_MODEL_NAME default_overlap = 25 # Definition of", "axon_segmentation function path_model, config = generate_default_parameters(type_, new_path) resolution_model = generate_resolution(type_,", "least {0}x{0} after resampling to a resolution of {1} to", "is not None) and new_path.exists(): path_model = new_path else: path_model", "model. ''' dict_size = { \"SEM\":{ \"512\":0.1, \"256\":0.2 }, \"TEM\":{", "= convert_path(path_testing_image) path_model = convert_path(path_model) if path_testing_image.exists(): # Extracting the", "resampled_resolutions=resolution_model, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: tqdm.write(\"Image {0} segmented.\".format(str(path_testing_images_folder", "path_model = new_path else: path_model = MODELS_PATH / SEM_DEFAULT_MODEL_NAME elif", "model. :param overlap_value: the number of pixels to be used", "all folders in the specified folder containing acquisitions to segment.", "psm = float(resolution_file.read()) else: print(\"ERROR: No pixel size is provided,", "the pixel size value.\" ) sys.exit(3) # Check that image", "for given resolution to reach minimum patch size after resizing.", "# Remove temporary file used for the segmentation fp.close() (path_acquisition", "'The default SEM model path is: \\n'+str(default_SEM_path)+'\\n'+ 'The default TEM", "of {1} to create standard sized patches.\\n\".format(config[\"trainingset_patchsize\"], resolution_model), \"One of", "type_acquisition == 'SEM': if (new_path is not None) and new_path.exists():", "scanning electron microscopy samples. \\n'+ 'TEM: transmission electron microscopy samples.", "selected_model = path_model.name # Read image img = ads.imread(str(path_testing_image)) #", "\"Image file location: {0}\".format(current_path_target) ) sys.exit(2) # Performing the segmentation", "[height, width] minimum_resolution = config[\"trainingset_patchsize\"] * resolution_model / min(image_size) if", "will be used for the segmentation.', default=None) ap.add_argument('-v', '--verbose', required=False,", "the arguments to axon_segmentation function path_model, config = generate_default_parameters(type_, new_path)", "be added to the image folder path. The pixel size", "progress bar for the segmentation. \\n'+ '1: Also displays the", "_ = ads.imread(str(current_path_target)).shape except: try: height, width = ads.imread(str(current_path_target)).shape except", "path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") sys.exit(0) #", "are using selected_model = path_model.name # Read image img =", "its folder path from the total path. path_parts = path_testing_image.parts", "provided on the CLI if psm == None: # Check", "segment_image(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\") else:", "help='Overlap value (in pixels) of the patches when doing the", "processed in the current sample.', default=0) ap.add_argument('-o', '--overlap', required=False, type=int,", "the path where it is located. :param path_to_config: relative path", "= [height, width] minimum_resolution = config[\"trainingset_patchsize\"] * resolution_model / min(image_size)", "to use. :return: the config dictionary. ''' # If string,", "location: {0}\".format(current_path_target) ) sys.exit(2) # Performing the segmentation over the", "open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else: print(\"ERROR: No", "passed into arguments for current_path_target in path_target_list: if not current_path_target.is_dir():", "/ SEM_DEFAULT_MODEL_NAME elif type_acquisition == 'TEM': if (new_path is not", "\"default_SEM_model_v1\" TEM_DEFAULT_MODEL_NAME = \"default_TEM_model_v1\" MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models') MODELS_PATH =", "= convert_path(path_to_config_file) try: with open(path_to_config_file, 'r') as fd: config_network =", "string, convert to Path objects path_to_config_file = convert_path(path_to_config_file) try: with", "sys.exit(2) # Performing the segmentation over the image segment_image(current_path_target, path_model,", "for the default model corresponding to the type_model acquisition. :param", "pathlib import Path import json import argparse from argparse import", "image located at the path_testing_image location. :param path_testing_image: the path", "'--verbose', required=False, type=int, choices=list(range(0,4)), help='Verbosity level. \\n'+ '0 (default) :", "p in args[\"imgpath\"]] new_path = Path(args[\"model\"]) if args[\"model\"] else None", "segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png' axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name], path_model_folder=path_model,", "path. ''' # If string, convert to Path objects path_to_config_file", "default_TEM_path = MODELS_PATH / TEM_DEFAULT_MODEL_NAME default_overlap = 25 # Definition", "'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read())", "is/are not image(s). Please update the input path(s) and try", "# Building the path of the requested model if it", "height, width = ads.imread(str(path_testing_images_folder / file_)).shape except Exception as e:", "size of one of the images ({0}x{1}) is too small", "{1} to create standard sized patches.\\n\".format(config[\"trainingset_patchsize\"], resolution_model), \"One of the", "the image name and its folder path from the total", "the progress bar for the segmentation. \\n'+ '1: Also displays", "'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else: print(\"ERROR: No pixel size", "if (current_path_target / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target / 'pixel_size_in_micrometer.txt', 'r')", "# Generate tmpfile for segmentation pipeline fp = open(path_testing_images_folder /", "vars(ap.parse_args(argv)) type_ = str(args[\"type\"]) verbosity_level = int(args[\"verbose\"]) overlap_value = int(args[\"overlap\"])", "but more time to perform the segmentation. :param config: dict", "or \"TEM\" :param model_input_size: String or Int, the size of", "\"TEM\" :param model_input_size: String or Int, the size of the", "/ TEM_DEFAULT_MODEL_NAME default_overlap = 25 # Definition of the functions", ":param path_testing_images_folder: the folder where all image folders are located", "model if it exists and was supplied, else we load", "over the image segment_image(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level)", "(file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff')) and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))] # Pre-processing: convert to", "\\n'+ 'in that file will be used for the segmentation.',", "the folder \\n'+ 'where the image(s) to segment is/are located.')", "model is located. \\n'+ 'The default SEM model path is:", "the image located at the path_testing_image location. :param path_testing_image: the", "segment by selecting only image files (not already segmented or", "image size must be at least {0}x{0} after resampling to", "the type of model to get the parameters from. :param", "The pixel size \\n'+ 'in that file will be used", "''' dict_size = { \"SEM\":{ \"512\":0.1, \"256\":0.2 }, \"TEM\":{ \"512\":0.01", "main(argv=None): ''' Main loop. :return: Exit code. 0: Success 2:", "no pixel size is specified, a pixel_size_in_micrometer.txt \\n'+ 'file needs", "image for conversion img = ads.imread(str(path_testing_images_folder / file_)) # Generate", "or add a pixel_size_in_micrometer.txt file \", \"containing the pixel size", "the specified folder containing acquisitions to segment. segment_folders(current_path_target, path_model, overlap_value,", "args = vars(ap.parse_args(argv)) type_ = str(args[\"type\"]) verbosity_level = int(args[\"verbose\"]) overlap_value", "= path_model.name # Read image img = ads.imread(str(path_testing_image)) # Generate", "corresponding to the type_model acquisition. :param type_model: String, the type", "or path to the folder \\n'+ 'where the image(s) to", "pixels) of the patches when doing the segmentation. \\n'+ 'Higher", "the path of the requested model if it exists and", "images to segment are located in those image folders) :param", "Global variables SEM_DEFAULT_MODEL_NAME = \"default_SEM_model_v1\" TEM_DEFAULT_MODEL_NAME = \"default_TEM_model_v1\" MODELS_PATH =", "supplied, else we load the default model. if type_acquisition ==", "tqdm import tqdm import pkg_resources import AxonDeepSeg import AxonDeepSeg.ads_utils as", "specified folder containing acquisitions to segment. segment_folders(current_path_target, path_model, overlap_value, config,", "number of pixels to be used for overlap when doing", "of {0} after resampling to that resolution.\\n\".format(round(psm * min(image_size) /", "write_mode=True) if verbosity_level >= 1: print((\"Image {0} segmented.\".format(path_testing_image))) # Remove", "model to get the parameters from. :param new_path: Path to", "# <NAME> - 2017-08-30 # Imports import sys from pathlib", "= ads.imread(str(path_testing_image)) # Generate tmp file fp = open(path_acquisition /", "a pixel_size_in_micrometer.txt \\n'+ 'file needs to be added to the", "Generate tmpfile for segmentation pipeline fp = open(path_testing_images_folder / '__tmp_segment__.png',", "resolution.\\n\".format(round(acquired_resolution * min(image_size) / resolution_model)), \"Image file location: {0}\".format(str(path_testing_images_folder /", "dict containing the configuration of the network, or None if", "'wb+') img_name_original = acquisition_name.stem if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img, format='png')", "None) and new_path.exists(): path_model = new_path else: path_model = MODELS_PATH", "\"Image file location: {0}\".format(str(path_testing_images_folder / file_)) ) sys.exit(2) selected_model =", "1: tqdm.write(\"Image {0} segmented.\".format(str(path_testing_images_folder / file_))) # Remove temporary file", "fp.close() (path_testing_images_folder / '__tmp_segment__.png').unlink() return None def generate_default_parameters(type_acquisition, new_path): '''", "new_path: Path to the model to use. :return: the config", "located (the images to segment are located in those image", "img = ads.imread(str(path_testing_images_folder / file_)) # Generate tmpfile for segmentation", "Handle cases if no resolution is provided on the CLI", "SEM_DEFAULT_MODEL_NAME default_TEM_path = MODELS_PATH / TEM_DEFAULT_MODEL_NAME default_overlap = 25 #", "\"containing the pixel size value.\" ) sys.exit(3) # Check that", "# Extracting the image name and its folder path from", "/ '__tmp_segment__.png').unlink() else: print((\"The path {0} does not exist.\".format(path_testing_image))) return", "= img_name_original + '_seg-axonmyelin' + '.png' axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config,", "convert_path(path_testing_images_folder) path_model = convert_path(path_model) # Update list of images to", "'.png' # Performing the segmentation axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model',", "small for the provided pixel size ({2}).\\n\".format(height, width, acquired_resolution), \"The", "/ file_)) # Generate tmpfile for segmentation pipeline fp =", "image segment_image(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation finished.\")", "Level of verbosity. The higher, the more information is given", "access the model :param overlap_value: the number of pixels to", "Float, the resolution of the model. ''' dict_size = {", ") sys.exit(3) # Check that image size is large enough", "None, verbosity_level=0): ''' Segment the image located at the path_testing_image", "-s), or add a pixel_size_in_micrometer.txt file \", \"containing the pixel", "a size of {0} after resampling to that resolution.\\n\".format(round(psm *", "'__tmp_segment__.png', 'wb+') img_name_original = file_.stem if selected_model == \"default_TEM_model_v1\": ads.imwrite(fp,255-img,", "try: height, width, _ = ads.imread(str(current_path_target)).shape except: try: height, width", "from pathlib import Path import json import argparse from argparse", "acquired_resolution=acquired_resolution, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: print((\"Image {0} segmented.\".format(path_testing_image)))", "value means less border effects but more time to perform", "current sample.', default=0) ap.add_argument('-o', '--overlap', required=False, type=int, help='Overlap value (in", "type_ = str(args[\"type\"]) verbosity_level = int(args[\"verbose\"]) overlap_value = int(args[\"overlap\"]) if", "String or Int, the size of the input. :return: Float,", "size is large enough for given resolution to reach minimum", "overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segments the", "import Path import json import argparse from argparse import RawTextHelpFormatter", "for the segmentation. \\n'+ '1: Also displays the path of", "or Int, the size of the input. :return: Float, the", "segmentation. \\n'+ 'Higher values of overlap can improve the segmentation", "from AxonDeepSeg.ads_utils import convert_path # Global variables SEM_DEFAULT_MODEL_NAME = \"default_SEM_model_v1\"", "None, verbosity_level=0): ''' Segments the images contained in the image", "# Handle cases if no resolution is provided on the", "# Read image for conversion img = ads.imread(str(path_testing_images_folder / file_))", "args[\"imgpath\"]] new_path = Path(args[\"model\"]) if args[\"model\"] else None # Preparing", "(new_path is not None) and new_path.exists(): path_model = new_path else:", "(using argument -s), or add a pixel_size_in_micrometer.txt file \", \"containing", "pixel size value.\" ) sys.exit(3) # Check that image size", "\"TEM\":{ \"512\":0.01 } } return dict_size[str(type_acquisition)][str(model_input_size)] # Main loop def", "resampling to that resolution.\\n\".format(round(acquired_resolution * min(image_size) / resolution_model)), \"Image file", "the configuration file from the path where it is located.", "folder where all image folders are located (the images to", "path_testing_image.exists(): # Extracting the image name and its folder path", ":param path_to_config: relative path where the file config_network.json is located.", "# Processing the arguments args = vars(ap.parse_args(argv)) type_ = str(args[\"type\"])", "is specified, a pixel_size_in_micrometer.txt \\n'+ 'file needs to be added", "to segment. segment_folders(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print(\"Segmentation", "string, convert to Path objects path_testing_image = convert_path(path_testing_image) path_model =", "path_model.name # Read image for conversion img = ads.imread(str(path_testing_images_folder /", "= None, verbosity_level=0): ''' Segment the image located at the", "{0} after resampling to that resolution.\\n\".format(round(acquired_resolution * min(image_size) / resolution_model)),", "path(s) and try again.\") break else: # Handle cases if", "size ({2}).\\n\".format(height, width, psm), \"The image size must be at", "Remove temporary file used for the segmentation fp.close() (path_testing_images_folder /", "and was supplied, else we load the default model. if", "folder containing acquisitions to segment. segment_folders(current_path_target, path_model, overlap_value, config, resolution_model,", "= pkg_resources.resource_filename('AxonDeepSeg', 'models') MODELS_PATH = Path(MODELS_PATH) default_SEM_path = MODELS_PATH /", "convert_path(path_model) # Update list of images to segment by selecting", "configuration file was found at the mentioned path. ''' #", "displays the path of the image(s) being segmented. \\n'+ '2:", "= ads.imread(str(path_testing_images_folder / file_)).shape except: try: height, width = ads.imread(str(path_testing_images_folder", "in path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff')) and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))] #", "is no pixel_size_in_micrometer.txt file in image folder. \", \"Please provide", "verbosity_level >= 1: tqdm.write(\"Image {0} segmented.\".format(str(path_testing_images_folder / file_))) # Remove", "help='Type of acquisition to segment. \\n'+ 'SEM: scanning electron microscopy", "the model. :param overlap_value: the number of pixels to be", "size (using argument -s), or add a pixel_size_in_micrometer.txt file \",", "if acquired_resolution < minimum_resolution: print(\"EXCEPTION: The size of one of", "image(s). Please update the input path(s) and try again.\") break", "the segmentation of current sample. \\n'+ '3: Also displays the", "of the image has a size of {0} after resampling", "pixel size ({2}).\\n\".format(height, width, acquired_resolution), \"The image size must be", "width] minimum_resolution = config[\"trainingset_patchsize\"] * resolution_model / min(image_size) if acquired_resolution", "= None path_target_list = [Path(p) for p in args[\"imgpath\"]] new_path", "print(\"Segmentation finished.\") sys.exit(0) # Calling the script if __name__ ==", "if verbosity_level >= 1: print((\"Image {0} segmented.\".format(path_testing_image))) # Remove temporary", "sys.exit(0) # Calling the script if __name__ == '__main__': main()" ]
[ "h_testing.bootstrap_CI(5000) print (mean_SD(s1), mean_SD(s2)) print (\"h_testing.d, analytic, correction = \",", "(a - b) < math.fabs((a + b) / (count_error *", "in range(length): sample.append(random.gauss(mean, sigma)) return sample def close_enough (a, b,", "= \", h_testing.d, (m2 - m1) / sig, h_testing.correction) print", "(m2 - m1) / sig, h_testing.correction) print (\"lower: approx, bootstrap\",", "but gives wider intervals at low d assert close_enough(approx_95CI_lower, bs_95CI_lower,", "+ b) / (count_error * 2)) : return True else:", "test_gaussian_case_med(): gaussian_case(0.5) #expect d = 2 def test_gaussian_case_high(): gaussian_case(1.0) #expect", "sample.append(random.gauss(mean, sigma)) return sample def close_enough (a, b, count_error): if", "math def generate_sample (length, mean, sigma): #generate a list of", "count_error) assert close_enough(approx_95CI_upper, bs_95CI_upper, count_error) assert close_enough(h_testing.d, (m2 - m1)", "sig) h_testing = Hedges_d(s1, s2) h_testing.hedges_d_unbiased() #answer is in self.d", "return sample def close_enough (a, b, count_error): if math.fabs (a", "(length, mean, sigma): #generate a list of normal distributed samples", "= [] for n in range(length): sample.append(random.gauss(mean, sigma)) return sample", "Hedges_d(s1, s2) h_testing.hedges_d_unbiased() #answer is in self.d approx_95CI_lower, approx_95CI_upper =", "s1 = generate_sample (sample_size, m1, sig) s2 = generate_sample (sample_size,", "count_error): if math.fabs (a - b) < math.fabs((a + b)", "print (\"h_testing.d, analytic, correction = \", h_testing.d, (m2 - m1)", "low d assert close_enough(approx_95CI_lower, bs_95CI_lower, count_error) assert close_enough(approx_95CI_upper, bs_95CI_upper, count_error)", "def test_gaussian_case_med(): gaussian_case(0.5) #expect d = 2 def test_gaussian_case_high(): gaussian_case(1.0)", "m2 = 2 s1 = generate_sample (sample_size, m1, sig) s2", "#expect d = 5 def test_gaussian_case_med(): gaussian_case(0.5) #expect d =", "math.fabs (a - b) < math.fabs((a + b) / (count_error", "sample def close_enough (a, b, count_error): if math.fabs (a -", "/ sig, h_testing.correction) print (\"lower: approx, bootstrap\", approx_95CI_lower, bs_95CI_lower) print", "/ (count_error * 2)) : return True else: return False", "def close_enough (a, b, count_error): if math.fabs (a - b)", "self.d approx_95CI_lower, approx_95CI_upper = h_testing.approx_CI() bs_95CI_lower, bs_95CI_upper = h_testing.bootstrap_CI(5000) print", "1 m2 = 2 s1 = generate_sample (sample_size, m1, sig)", "similar at high d but gives wider intervals at low", "(\"upper: approx, bootstrap\", approx_95CI_upper, bs_95CI_upper) #bootstrap is similar at high", "#expect d = 2 def test_gaussian_case_high(): gaussian_case(1.0) #expect d =", "def gaussian_case (sig): sample_size = 200 count_error = math.sqrt(sample_size) m1", "dcstats.hedges import Hedges_d from dcstats.statistics_EJ import simple_stats as mean_SD import", "approx_95CI_upper = h_testing.approx_CI() bs_95CI_lower, bs_95CI_upper = h_testing.bootstrap_CI(5000) print (mean_SD(s1), mean_SD(s2))", "bs_95CI_lower, bs_95CI_upper = h_testing.bootstrap_CI(5000) print (mean_SD(s1), mean_SD(s2)) print (\"h_testing.d, analytic,", "correction = \", h_testing.d, (m2 - m1) / sig, h_testing.correction)", "= generate_sample (sample_size, m2, sig) h_testing = Hedges_d(s1, s2) h_testing.hedges_d_unbiased()", "= 2 def test_gaussian_case_high(): gaussian_case(1.0) #expect d = 1, fail", "(count_error * 2)) : return True else: return False def", "s2) h_testing.hedges_d_unbiased() #answer is in self.d approx_95CI_lower, approx_95CI_upper = h_testing.approx_CI()", "if math.fabs (a - b) < math.fabs((a + b) /", "intervals at low d assert close_enough(approx_95CI_lower, bs_95CI_lower, count_error) assert close_enough(approx_95CI_upper,", "/ sig, count_error) ###tests def test_gaussian_case_low(): gaussian_case(0.2) #expect d =", "bs_95CI_lower) print (\"upper: approx, bootstrap\", approx_95CI_upper, bs_95CI_upper) #bootstrap is similar", "5 def test_gaussian_case_med(): gaussian_case(0.5) #expect d = 2 def test_gaussian_case_high():", "random import math def generate_sample (length, mean, sigma): #generate a", "m1) / sig, count_error) ###tests def test_gaussian_case_low(): gaussian_case(0.2) #expect d", "count_error) ###tests def test_gaussian_case_low(): gaussian_case(0.2) #expect d = 5 def", "import random import math def generate_sample (length, mean, sigma): #generate", "###tests def test_gaussian_case_low(): gaussian_case(0.2) #expect d = 5 def test_gaussian_case_med():", "= 1 m2 = 2 s1 = generate_sample (sample_size, m1,", "b, count_error): if math.fabs (a - b) < math.fabs((a +", "Hedges_d from dcstats.statistics_EJ import simple_stats as mean_SD import random import", "generate_sample (sample_size, m2, sig) h_testing = Hedges_d(s1, s2) h_testing.hedges_d_unbiased() #answer", "- m1) / sig, h_testing.correction) print (\"lower: approx, bootstrap\", approx_95CI_lower,", "2)) : return True else: return False def gaussian_case (sig):", "- m1) / sig, count_error) ###tests def test_gaussian_case_low(): gaussian_case(0.2) #expect", "m1 = 1 m2 = 2 s1 = generate_sample (sample_size,", "(m2 - m1) / sig, count_error) ###tests def test_gaussian_case_low(): gaussian_case(0.2)", "approx, bootstrap\", approx_95CI_upper, bs_95CI_upper) #bootstrap is similar at high d", "range(length): sample.append(random.gauss(mean, sigma)) return sample def close_enough (a, b, count_error):", "close_enough(approx_95CI_upper, bs_95CI_upper, count_error) assert close_enough(h_testing.d, (m2 - m1) / sig,", "def generate_sample (length, mean, sigma): #generate a list of normal", "= 5 def test_gaussian_case_med(): gaussian_case(0.5) #expect d = 2 def", "close_enough(h_testing.d, (m2 - m1) / sig, count_error) ###tests def test_gaussian_case_low():", "import Hedges_d from dcstats.statistics_EJ import simple_stats as mean_SD import random", "for n in range(length): sample.append(random.gauss(mean, sigma)) return sample def close_enough", "math.sqrt(sample_size) m1 = 1 m2 = 2 s1 = generate_sample", "(\"lower: approx, bootstrap\", approx_95CI_lower, bs_95CI_lower) print (\"upper: approx, bootstrap\", approx_95CI_upper,", "sig, h_testing.correction) print (\"lower: approx, bootstrap\", approx_95CI_lower, bs_95CI_lower) print (\"upper:", "high d but gives wider intervals at low d assert", "h_testing.approx_CI() bs_95CI_lower, bs_95CI_upper = h_testing.bootstrap_CI(5000) print (mean_SD(s1), mean_SD(s2)) print (\"h_testing.d,", "normal distributed samples sample = [] for n in range(length):", "= 2 s1 = generate_sample (sample_size, m1, sig) s2 =", "close_enough (a, b, count_error): if math.fabs (a - b) <", "mean_SD import random import math def generate_sample (length, mean, sigma):", "sig) s2 = generate_sample (sample_size, m2, sig) h_testing = Hedges_d(s1,", "b) < math.fabs((a + b) / (count_error * 2)) :", "wider intervals at low d assert close_enough(approx_95CI_lower, bs_95CI_lower, count_error) assert", "m1, sig) s2 = generate_sample (sample_size, m2, sig) h_testing =", "print (\"lower: approx, bootstrap\", approx_95CI_lower, bs_95CI_lower) print (\"upper: approx, bootstrap\",", "n in range(length): sample.append(random.gauss(mean, sigma)) return sample def close_enough (a,", "is in self.d approx_95CI_lower, approx_95CI_upper = h_testing.approx_CI() bs_95CI_lower, bs_95CI_upper =", "list of normal distributed samples sample = [] for n", "bootstrap\", approx_95CI_lower, bs_95CI_lower) print (\"upper: approx, bootstrap\", approx_95CI_upper, bs_95CI_upper) #bootstrap", "gives wider intervals at low d assert close_enough(approx_95CI_lower, bs_95CI_lower, count_error)", "False def gaussian_case (sig): sample_size = 200 count_error = math.sqrt(sample_size)", "analytic, correction = \", h_testing.d, (m2 - m1) / sig,", "gaussian_case(0.2) #expect d = 5 def test_gaussian_case_med(): gaussian_case(0.5) #expect d", "(sample_size, m1, sig) s2 = generate_sample (sample_size, m2, sig) h_testing", "m1) / sig, h_testing.correction) print (\"lower: approx, bootstrap\", approx_95CI_lower, bs_95CI_lower)", "True else: return False def gaussian_case (sig): sample_size = 200", "(a, b, count_error): if math.fabs (a - b) < math.fabs((a", "d = 5 def test_gaussian_case_med(): gaussian_case(0.5) #expect d = 2", "count_error) assert close_enough(h_testing.d, (m2 - m1) / sig, count_error) ###tests", "import simple_stats as mean_SD import random import math def generate_sample", "assert close_enough(approx_95CI_upper, bs_95CI_upper, count_error) assert close_enough(h_testing.d, (m2 - m1) /", "(sig): sample_size = 200 count_error = math.sqrt(sample_size) m1 = 1", "gaussian_case (sig): sample_size = 200 count_error = math.sqrt(sample_size) m1 =", "[] for n in range(length): sample.append(random.gauss(mean, sigma)) return sample def", "as mean_SD import random import math def generate_sample (length, mean,", "= math.sqrt(sample_size) m1 = 1 m2 = 2 s1 =", "from dcstats.statistics_EJ import simple_stats as mean_SD import random import math", "- b) < math.fabs((a + b) / (count_error * 2))", "samples sample = [] for n in range(length): sample.append(random.gauss(mean, sigma))", "of normal distributed samples sample = [] for n in", "generate_sample (length, mean, sigma): #generate a list of normal distributed", "< math.fabs((a + b) / (count_error * 2)) : return", "#answer is in self.d approx_95CI_lower, approx_95CI_upper = h_testing.approx_CI() bs_95CI_lower, bs_95CI_upper", "def test_gaussian_case_low(): gaussian_case(0.2) #expect d = 5 def test_gaussian_case_med(): gaussian_case(0.5)", "bs_95CI_upper) #bootstrap is similar at high d but gives wider", "math.fabs((a + b) / (count_error * 2)) : return True", "* 2)) : return True else: return False def gaussian_case", "h_testing.correction) print (\"lower: approx, bootstrap\", approx_95CI_lower, bs_95CI_lower) print (\"upper: approx,", "test_gaussian_case_low(): gaussian_case(0.2) #expect d = 5 def test_gaussian_case_med(): gaussian_case(0.5) #expect", "(mean_SD(s1), mean_SD(s2)) print (\"h_testing.d, analytic, correction = \", h_testing.d, (m2", "h_testing.hedges_d_unbiased() #answer is in self.d approx_95CI_lower, approx_95CI_upper = h_testing.approx_CI() bs_95CI_lower,", "approx_95CI_lower, approx_95CI_upper = h_testing.approx_CI() bs_95CI_lower, bs_95CI_upper = h_testing.bootstrap_CI(5000) print (mean_SD(s1),", "print (\"upper: approx, bootstrap\", approx_95CI_upper, bs_95CI_upper) #bootstrap is similar at", "a list of normal distributed samples sample = [] for", "d assert close_enough(approx_95CI_lower, bs_95CI_lower, count_error) assert close_enough(approx_95CI_upper, bs_95CI_upper, count_error) assert", "assert close_enough(approx_95CI_lower, bs_95CI_lower, count_error) assert close_enough(approx_95CI_upper, bs_95CI_upper, count_error) assert close_enough(h_testing.d,", "h_testing = Hedges_d(s1, s2) h_testing.hedges_d_unbiased() #answer is in self.d approx_95CI_lower,", "approx_95CI_upper, bs_95CI_upper) #bootstrap is similar at high d but gives", "dcstats.statistics_EJ import simple_stats as mean_SD import random import math def", "return True else: return False def gaussian_case (sig): sample_size =", "= Hedges_d(s1, s2) h_testing.hedges_d_unbiased() #answer is in self.d approx_95CI_lower, approx_95CI_upper", "approx, bootstrap\", approx_95CI_lower, bs_95CI_lower) print (\"upper: approx, bootstrap\", approx_95CI_upper, bs_95CI_upper)", "= generate_sample (sample_size, m1, sig) s2 = generate_sample (sample_size, m2,", "distributed samples sample = [] for n in range(length): sample.append(random.gauss(mean,", "count_error = math.sqrt(sample_size) m1 = 1 m2 = 2 s1", "bs_95CI_upper = h_testing.bootstrap_CI(5000) print (mean_SD(s1), mean_SD(s2)) print (\"h_testing.d, analytic, correction", "sample_size = 200 count_error = math.sqrt(sample_size) m1 = 1 m2", "= h_testing.approx_CI() bs_95CI_lower, bs_95CI_upper = h_testing.bootstrap_CI(5000) print (mean_SD(s1), mean_SD(s2)) print", "sigma)) return sample def close_enough (a, b, count_error): if math.fabs", "from dcstats.hedges import Hedges_d from dcstats.statistics_EJ import simple_stats as mean_SD", "b) / (count_error * 2)) : return True else: return", "in self.d approx_95CI_lower, approx_95CI_upper = h_testing.approx_CI() bs_95CI_lower, bs_95CI_upper = h_testing.bootstrap_CI(5000)", "h_testing.d, (m2 - m1) / sig, h_testing.correction) print (\"lower: approx,", "bootstrap\", approx_95CI_upper, bs_95CI_upper) #bootstrap is similar at high d but", "generate_sample (sample_size, m1, sig) s2 = generate_sample (sample_size, m2, sig)", "(\"h_testing.d, analytic, correction = \", h_testing.d, (m2 - m1) /", "else: return False def gaussian_case (sig): sample_size = 200 count_error", ": return True else: return False def gaussian_case (sig): sample_size", "(sample_size, m2, sig) h_testing = Hedges_d(s1, s2) h_testing.hedges_d_unbiased() #answer is", "200 count_error = math.sqrt(sample_size) m1 = 1 m2 = 2", "at high d but gives wider intervals at low d", "bs_95CI_lower, count_error) assert close_enough(approx_95CI_upper, bs_95CI_upper, count_error) assert close_enough(h_testing.d, (m2 -", "bs_95CI_upper, count_error) assert close_enough(h_testing.d, (m2 - m1) / sig, count_error)", "= h_testing.bootstrap_CI(5000) print (mean_SD(s1), mean_SD(s2)) print (\"h_testing.d, analytic, correction =", "close_enough(approx_95CI_lower, bs_95CI_lower, count_error) assert close_enough(approx_95CI_upper, bs_95CI_upper, count_error) assert close_enough(h_testing.d, (m2", "sigma): #generate a list of normal distributed samples sample =", "2 s1 = generate_sample (sample_size, m1, sig) s2 = generate_sample", "at low d assert close_enough(approx_95CI_lower, bs_95CI_lower, count_error) assert close_enough(approx_95CI_upper, bs_95CI_upper,", "assert close_enough(h_testing.d, (m2 - m1) / sig, count_error) ###tests def", "print (mean_SD(s1), mean_SD(s2)) print (\"h_testing.d, analytic, correction = \", h_testing.d,", "mean, sigma): #generate a list of normal distributed samples sample", "approx_95CI_lower, bs_95CI_lower) print (\"upper: approx, bootstrap\", approx_95CI_upper, bs_95CI_upper) #bootstrap is", "simple_stats as mean_SD import random import math def generate_sample (length,", "m2, sig) h_testing = Hedges_d(s1, s2) h_testing.hedges_d_unbiased() #answer is in", "mean_SD(s2)) print (\"h_testing.d, analytic, correction = \", h_testing.d, (m2 -", "gaussian_case(0.5) #expect d = 2 def test_gaussian_case_high(): gaussian_case(1.0) #expect d", "s2 = generate_sample (sample_size, m2, sig) h_testing = Hedges_d(s1, s2)", "#generate a list of normal distributed samples sample = []", "= 200 count_error = math.sqrt(sample_size) m1 = 1 m2 =", "#bootstrap is similar at high d but gives wider intervals", "d but gives wider intervals at low d assert close_enough(approx_95CI_lower,", "sig, count_error) ###tests def test_gaussian_case_low(): gaussian_case(0.2) #expect d = 5", "is similar at high d but gives wider intervals at", "d = 2 def test_gaussian_case_high(): gaussian_case(1.0) #expect d = 1,", "return False def gaussian_case (sig): sample_size = 200 count_error =", "sample = [] for n in range(length): sample.append(random.gauss(mean, sigma)) return", "import math def generate_sample (length, mean, sigma): #generate a list", "\", h_testing.d, (m2 - m1) / sig, h_testing.correction) print (\"lower:" ]
[ "import path from . import views urlpatterns = [ path('',", "path from . import views urlpatterns = [ path('', views.Records,", ". import views urlpatterns = [ path('', views.Records, name =\"fRec\"),", "django.urls import path from . import views urlpatterns = [", "from django.urls import path from . import views urlpatterns =", "import views urlpatterns = [ path('', views.Records, name =\"fRec\"), ]", "from . import views urlpatterns = [ path('', views.Records, name" ]
[ "\"transformer\" \"\"\" # Xfail this until the new spaCy rc", "in range(2): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) doc =", "\"textcat\"] train_examples = [] for text, annotations in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text),", "happy.\", {\"cats\": {\"POSITIVE\": 1.0, \"NEGATIVE\": 0.0}}), (\"I'm so angry\", {\"cats\":", "thinc.api import Config TRAIN_DATA = [ (\"I'm so happy.\", {\"cats\":", "trains properly. This used to throw an error because of", "used to throw an error because of shape inference issues", "@layers = \"reduce_mean.v1\" [components.transformer] factory = \"transformer\" \"\"\" # Xfail", "as d: file_path = d / \"trained_nlp\" nlp.to_disk(file_path) nlp2 =", "d / \"trained_nlp\" nlp.to_disk(file_path) nlp2 = util.load_model_from_path(file_path) doc2 = nlp2(\"We're", "] cfg_string = \"\"\" [nlp] lang = \"en\" pipeline =", "text, annotations in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer = nlp.initialize(get_examples=lambda: train_examples)", "Example from spacy.util import make_tempdir from spacy import util from", "error because of shape inference issues - cf https://github.com/explosion/spaCy/issues/6401\"\"\" orig_config", "pipeline with just a transformer+textcat runs and trains properly. This", "factory = \"transformer\" \"\"\" # Xfail this until the new", "[\"transformer\",\"textcat\"] [components] [components.textcat] factory = \"textcat\" [components.textcat.model] @architectures = \"spacy.TextCatEnsemble.v2\"", "(\"I'm so happy.\", {\"cats\": {\"POSITIVE\": 1.0, \"NEGATIVE\": 0.0}}), (\"I'm so", "from spacy import util from thinc.api import Config TRAIN_DATA =", "util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert nlp.pipe_names == [\"transformer\", \"textcat\"] train_examples =", "= Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert nlp.pipe_names ==", "This used to throw an error because of shape inference", "[components.textcat.model.tok2vec] @architectures = \"spacy-transformers.TransformerListener.v1\" grad_factor = 1.0 [components.textcat.model.tok2vec.pooling] @layers =", "a pipeline with just a transformer+textcat runs and trains properly.", "nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert nlp.pipe_names == [\"transformer\", \"textcat\"]", "nlp2(\"We're interested at underwater basket weaving.\") cats2 = doc2.cats assert", "= util.load_model_from_path(file_path) doc2 = nlp2(\"We're interested at underwater basket weaving.\")", "# ensure IO goes OK with make_tempdir() as d: file_path", "\"spacy-transformers.TransformerListener.v1\" grad_factor = 1.0 [components.textcat.model.tok2vec.pooling] @layers = \"reduce_mean.v1\" [components.transformer] factory", "the new spaCy rc is up. @pytest.mark.xfail def test_transformer_pipeline_textcat(): \"\"\"Test", "@architectures = \"spacy.TextCatEnsemble.v2\" [components.textcat.model.tok2vec] @architectures = \"spacy-transformers.TransformerListener.v1\" grad_factor = 1.0", "{\"cats\": {\"POSITIVE\": 0.0, \"NEGATIVE\": 1.0}}), ] cfg_string = \"\"\" [nlp]", "spaCy rc is up. @pytest.mark.xfail def test_transformer_pipeline_textcat(): \"\"\"Test that a", "losses=losses) doc = nlp(\"We're interested at underwater basket weaving.\") cats1", "= nlp2(\"We're interested at underwater basket weaving.\") cats2 = doc2.cats", "= nlp.initialize(get_examples=lambda: train_examples) for i in range(2): losses = {}", "underwater basket weaving.\") cats1 = doc.cats # ensure IO goes", "range(2): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) doc = nlp(\"We're", "interested at underwater basket weaving.\") cats1 = doc.cats # ensure", "rc is up. @pytest.mark.xfail def test_transformer_pipeline_textcat(): \"\"\"Test that a pipeline", "(\"I'm so angry\", {\"cats\": {\"POSITIVE\": 0.0, \"NEGATIVE\": 1.0}}), ] cfg_string", "[components.textcat.model.tok2vec.pooling] @layers = \"reduce_mean.v1\" [components.transformer] factory = \"transformer\" \"\"\" #", "doc.cats # ensure IO goes OK with make_tempdir() as d:", "annotations)) optimizer = nlp.initialize(get_examples=lambda: train_examples) for i in range(2): losses", "an error because of shape inference issues - cf https://github.com/explosion/spaCy/issues/6401\"\"\"", "lang = \"en\" pipeline = [\"transformer\",\"textcat\"] [components] [components.textcat] factory =", "issues - cf https://github.com/explosion/spaCy/issues/6401\"\"\" orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config,", "= \"textcat\" [components.textcat.model] @architectures = \"spacy.TextCatEnsemble.v2\" [components.textcat.model.tok2vec] @architectures = \"spacy-transformers.TransformerListener.v1\"", "basket weaving.\") cats1 = doc.cats # ensure IO goes OK", "0.0}}), (\"I'm so angry\", {\"cats\": {\"POSITIVE\": 0.0, \"NEGATIVE\": 1.0}}), ]", "make_tempdir from spacy import util from thinc.api import Config TRAIN_DATA", "Xfail this until the new spaCy rc is up. @pytest.mark.xfail", "annotations in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer = nlp.initialize(get_examples=lambda: train_examples) for", "from spacy.util import make_tempdir from spacy import util from thinc.api", "= d / \"trained_nlp\" nlp.to_disk(file_path) nlp2 = util.load_model_from_path(file_path) doc2 =", "that a pipeline with just a transformer+textcat runs and trains", "\"trained_nlp\" nlp.to_disk(file_path) nlp2 = util.load_model_from_path(file_path) doc2 = nlp2(\"We're interested at", "{\"POSITIVE\": 0.0, \"NEGATIVE\": 1.0}}), ] cfg_string = \"\"\" [nlp] lang", "[ (\"I'm so happy.\", {\"cats\": {\"POSITIVE\": 1.0, \"NEGATIVE\": 0.0}}), (\"I'm", "# Xfail this until the new spaCy rc is up.", "IO goes OK with make_tempdir() as d: file_path = d", "[components.textcat] factory = \"textcat\" [components.textcat.model] @architectures = \"spacy.TextCatEnsemble.v2\" [components.textcat.model.tok2vec] @architectures", "\"\"\" # Xfail this until the new spaCy rc is", "= [ (\"I'm so happy.\", {\"cats\": {\"POSITIVE\": 1.0, \"NEGATIVE\": 0.0}}),", "just a transformer+textcat runs and trains properly. This used to", "= [\"transformer\",\"textcat\"] [components] [components.textcat] factory = \"textcat\" [components.textcat.model] @architectures =", "test_transformer_pipeline_textcat(): \"\"\"Test that a pipeline with just a transformer+textcat runs", "[nlp] lang = \"en\" pipeline = [\"transformer\",\"textcat\"] [components] [components.textcat] factory", "- cf https://github.com/explosion/spaCy/issues/6401\"\"\" orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True,", "import Example from spacy.util import make_tempdir from spacy import util", "\"NEGATIVE\": 0.0}}), (\"I'm so angry\", {\"cats\": {\"POSITIVE\": 0.0, \"NEGATIVE\": 1.0}}),", "0.0, \"NEGATIVE\": 1.0}}), ] cfg_string = \"\"\" [nlp] lang =", "at underwater basket weaving.\") cats1 = doc.cats # ensure IO", "angry\", {\"cats\": {\"POSITIVE\": 0.0, \"NEGATIVE\": 1.0}}), ] cfg_string = \"\"\"", "= 1.0 [components.textcat.model.tok2vec.pooling] @layers = \"reduce_mean.v1\" [components.transformer] factory = \"transformer\"", "[components.transformer] factory = \"transformer\" \"\"\" # Xfail this until the", "= \"transformer\" \"\"\" # Xfail this until the new spaCy", "\"\"\" [nlp] lang = \"en\" pipeline = [\"transformer\",\"textcat\"] [components] [components.textcat]", "to throw an error because of shape inference issues -", "with just a transformer+textcat runs and trains properly. This used", "cf https://github.com/explosion/spaCy/issues/6401\"\"\" orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)", "so happy.\", {\"cats\": {\"POSITIVE\": 1.0, \"NEGATIVE\": 0.0}}), (\"I'm so angry\",", "[\"transformer\", \"textcat\"] train_examples = [] for text, annotations in TRAIN_DATA:", "{\"POSITIVE\": 1.0, \"NEGATIVE\": 0.0}}), (\"I'm so angry\", {\"cats\": {\"POSITIVE\": 0.0,", "\"NEGATIVE\": 1.0}}), ] cfg_string = \"\"\" [nlp] lang = \"en\"", "i in range(2): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) doc", "doc = nlp(\"We're interested at underwater basket weaving.\") cats1 =", "shape inference issues - cf https://github.com/explosion/spaCy/issues/6401\"\"\" orig_config = Config().from_str(cfg_string) nlp", "nlp.initialize(get_examples=lambda: train_examples) for i in range(2): losses = {} nlp.update(train_examples,", "\"\"\"Test that a pipeline with just a transformer+textcat runs and", "spacy import util from thinc.api import Config TRAIN_DATA = [", "underwater basket weaving.\") cats2 = doc2.cats assert cats1 == cats2", "1.0}}), ] cfg_string = \"\"\" [nlp] lang = \"en\" pipeline", "runs and trains properly. This used to throw an error", "d: file_path = d / \"trained_nlp\" nlp.to_disk(file_path) nlp2 = util.load_model_from_path(file_path)", "= [] for text, annotations in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer", "for text, annotations in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer = nlp.initialize(get_examples=lambda:", "from spacy.training.example import Example from spacy.util import make_tempdir from spacy", "import Config TRAIN_DATA = [ (\"I'm so happy.\", {\"cats\": {\"POSITIVE\":", "[components.textcat.model] @architectures = \"spacy.TextCatEnsemble.v2\" [components.textcat.model.tok2vec] @architectures = \"spacy-transformers.TransformerListener.v1\" grad_factor =", "train_examples = [] for text, annotations in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))", "{\"cats\": {\"POSITIVE\": 1.0, \"NEGATIVE\": 0.0}}), (\"I'm so angry\", {\"cats\": {\"POSITIVE\":", "= \"spacy.TextCatEnsemble.v2\" [components.textcat.model.tok2vec] @architectures = \"spacy-transformers.TransformerListener.v1\" grad_factor = 1.0 [components.textcat.model.tok2vec.pooling]", "a transformer+textcat runs and trains properly. This used to throw", "{} nlp.update(train_examples, sgd=optimizer, losses=losses) doc = nlp(\"We're interested at underwater", "at underwater basket weaving.\") cats2 = doc2.cats assert cats1 ==", "ensure IO goes OK with make_tempdir() as d: file_path =", "interested at underwater basket weaving.\") cats2 = doc2.cats assert cats1", "= \"\"\" [nlp] lang = \"en\" pipeline = [\"transformer\",\"textcat\"] [components]", "throw an error because of shape inference issues - cf", "\"en\" pipeline = [\"transformer\",\"textcat\"] [components] [components.textcat] factory = \"textcat\" [components.textcat.model]", "auto_fill=True, validate=True) assert nlp.pipe_names == [\"transformer\", \"textcat\"] train_examples = []", "util from thinc.api import Config TRAIN_DATA = [ (\"I'm so", "inference issues - cf https://github.com/explosion/spaCy/issues/6401\"\"\" orig_config = Config().from_str(cfg_string) nlp =", "= util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert nlp.pipe_names == [\"transformer\", \"textcat\"] train_examples", "train_examples) for i in range(2): losses = {} nlp.update(train_examples, sgd=optimizer,", "TRAIN_DATA = [ (\"I'm so happy.\", {\"cats\": {\"POSITIVE\": 1.0, \"NEGATIVE\":", "\"reduce_mean.v1\" [components.transformer] factory = \"transformer\" \"\"\" # Xfail this until", "pytest from spacy.training.example import Example from spacy.util import make_tempdir from", "= {} nlp.update(train_examples, sgd=optimizer, losses=losses) doc = nlp(\"We're interested at", "because of shape inference issues - cf https://github.com/explosion/spaCy/issues/6401\"\"\" orig_config =", "properly. This used to throw an error because of shape", "new spaCy rc is up. @pytest.mark.xfail def test_transformer_pipeline_textcat(): \"\"\"Test that", "make_tempdir() as d: file_path = d / \"trained_nlp\" nlp.to_disk(file_path) nlp2", "spacy.util import make_tempdir from spacy import util from thinc.api import", "goes OK with make_tempdir() as d: file_path = d /", "nlp(\"We're interested at underwater basket weaving.\") cats1 = doc.cats #", "= \"en\" pipeline = [\"transformer\",\"textcat\"] [components] [components.textcat] factory = \"textcat\"", "util.load_model_from_path(file_path) doc2 = nlp2(\"We're interested at underwater basket weaving.\") cats2", "so angry\", {\"cats\": {\"POSITIVE\": 0.0, \"NEGATIVE\": 1.0}}), ] cfg_string =", "== [\"transformer\", \"textcat\"] train_examples = [] for text, annotations in", "with make_tempdir() as d: file_path = d / \"trained_nlp\" nlp.to_disk(file_path)", "import make_tempdir from spacy import util from thinc.api import Config", "up. @pytest.mark.xfail def test_transformer_pipeline_textcat(): \"\"\"Test that a pipeline with just", "https://github.com/explosion/spaCy/issues/6401\"\"\" orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert", "[components] [components.textcat] factory = \"textcat\" [components.textcat.model] @architectures = \"spacy.TextCatEnsemble.v2\" [components.textcat.model.tok2vec]", "sgd=optimizer, losses=losses) doc = nlp(\"We're interested at underwater basket weaving.\")", "def test_transformer_pipeline_textcat(): \"\"\"Test that a pipeline with just a transformer+textcat", "\"spacy.TextCatEnsemble.v2\" [components.textcat.model.tok2vec] @architectures = \"spacy-transformers.TransformerListener.v1\" grad_factor = 1.0 [components.textcat.model.tok2vec.pooling] @layers", "pipeline = [\"transformer\",\"textcat\"] [components] [components.textcat] factory = \"textcat\" [components.textcat.model] @architectures", "is up. @pytest.mark.xfail def test_transformer_pipeline_textcat(): \"\"\"Test that a pipeline with", "spacy.training.example import Example from spacy.util import make_tempdir from spacy import", "Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert nlp.pipe_names == [\"transformer\",", "until the new spaCy rc is up. @pytest.mark.xfail def test_transformer_pipeline_textcat():", "= \"spacy-transformers.TransformerListener.v1\" grad_factor = 1.0 [components.textcat.model.tok2vec.pooling] @layers = \"reduce_mean.v1\" [components.transformer]", "weaving.\") cats1 = doc.cats # ensure IO goes OK with", "/ \"trained_nlp\" nlp.to_disk(file_path) nlp2 = util.load_model_from_path(file_path) doc2 = nlp2(\"We're interested", "factory = \"textcat\" [components.textcat.model] @architectures = \"spacy.TextCatEnsemble.v2\" [components.textcat.model.tok2vec] @architectures =", "nlp.pipe_names == [\"transformer\", \"textcat\"] train_examples = [] for text, annotations", "@pytest.mark.xfail def test_transformer_pipeline_textcat(): \"\"\"Test that a pipeline with just a", "1.0, \"NEGATIVE\": 0.0}}), (\"I'm so angry\", {\"cats\": {\"POSITIVE\": 0.0, \"NEGATIVE\":", "1.0 [components.textcat.model.tok2vec.pooling] @layers = \"reduce_mean.v1\" [components.transformer] factory = \"transformer\" \"\"\"", "optimizer = nlp.initialize(get_examples=lambda: train_examples) for i in range(2): losses =", "validate=True) assert nlp.pipe_names == [\"transformer\", \"textcat\"] train_examples = [] for", "and trains properly. This used to throw an error because", "[] for text, annotations in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer =", "grad_factor = 1.0 [components.textcat.model.tok2vec.pooling] @layers = \"reduce_mean.v1\" [components.transformer] factory =", "this until the new spaCy rc is up. @pytest.mark.xfail def", "orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert nlp.pipe_names", "in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer = nlp.initialize(get_examples=lambda: train_examples) for i", "= \"reduce_mean.v1\" [components.transformer] factory = \"transformer\" \"\"\" # Xfail this", "cfg_string = \"\"\" [nlp] lang = \"en\" pipeline = [\"transformer\",\"textcat\"]", "train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer = nlp.initialize(get_examples=lambda: train_examples) for i in range(2):", "of shape inference issues - cf https://github.com/explosion/spaCy/issues/6401\"\"\" orig_config = Config().from_str(cfg_string)", "OK with make_tempdir() as d: file_path = d / \"trained_nlp\"", "nlp2 = util.load_model_from_path(file_path) doc2 = nlp2(\"We're interested at underwater basket", "import pytest from spacy.training.example import Example from spacy.util import make_tempdir", "\"textcat\" [components.textcat.model] @architectures = \"spacy.TextCatEnsemble.v2\" [components.textcat.model.tok2vec] @architectures = \"spacy-transformers.TransformerListener.v1\" grad_factor", "nlp.update(train_examples, sgd=optimizer, losses=losses) doc = nlp(\"We're interested at underwater basket", "doc2 = nlp2(\"We're interested at underwater basket weaving.\") cats2 =", "@architectures = \"spacy-transformers.TransformerListener.v1\" grad_factor = 1.0 [components.textcat.model.tok2vec.pooling] @layers = \"reduce_mean.v1\"", "= nlp(\"We're interested at underwater basket weaving.\") cats1 = doc.cats", "import util from thinc.api import Config TRAIN_DATA = [ (\"I'm", "= doc.cats # ensure IO goes OK with make_tempdir() as", "assert nlp.pipe_names == [\"transformer\", \"textcat\"] train_examples = [] for text,", "for i in range(2): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses)", "from thinc.api import Config TRAIN_DATA = [ (\"I'm so happy.\",", "Config TRAIN_DATA = [ (\"I'm so happy.\", {\"cats\": {\"POSITIVE\": 1.0,", "transformer+textcat runs and trains properly. This used to throw an", "TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer = nlp.initialize(get_examples=lambda: train_examples) for i in", "cats1 = doc.cats # ensure IO goes OK with make_tempdir()", "file_path = d / \"trained_nlp\" nlp.to_disk(file_path) nlp2 = util.load_model_from_path(file_path) doc2", "nlp.to_disk(file_path) nlp2 = util.load_model_from_path(file_path) doc2 = nlp2(\"We're interested at underwater", "losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) doc = nlp(\"We're interested" ]
[ "command: \") action_creator = ActionCreator() while True: input_data = await", "= config async def run(self): await asyncio.sleep(1) print(\"Insert command: \")", "input_data: for task in asyncio.all_tasks(): task.cancel() break action = action_creator.parse(*input_data.split())", "True: input_data = await input(\"~> \") if not input_data: for", "class REPL: def __init__(self, action_queue, config, *args, **kwargs): self.action_queue =", "ActionCreator() while True: input_data = await input(\"~> \") if not", "for task in asyncio.all_tasks(): task.cancel() break action = action_creator.parse(*input_data.split()) if", "import input from .action_creator import ActionCreator class REPL: def __init__(self,", "task in asyncio.all_tasks(): task.cancel() break action = action_creator.parse(*input_data.split()) if action:", "input from .action_creator import ActionCreator class REPL: def __init__(self, action_queue,", "in asyncio.all_tasks(): task.cancel() break action = action_creator.parse(*input_data.split()) if action: self.action_queue.push_action(action)", "\") if not input_data: for task in asyncio.all_tasks(): task.cancel() break", "await asyncio.sleep(1) print(\"Insert command: \") action_creator = ActionCreator() while True:", "..core.common.io import input from .action_creator import ActionCreator class REPL: def", "__init__(self, action_queue, config, *args, **kwargs): self.action_queue = action_queue self.config =", "import asyncio from ..core.common.io import input from .action_creator import ActionCreator", "config, *args, **kwargs): self.action_queue = action_queue self.config = config async", "= action_queue self.config = config async def run(self): await asyncio.sleep(1)", "config async def run(self): await asyncio.sleep(1) print(\"Insert command: \") action_creator", "= ActionCreator() while True: input_data = await input(\"~> \") if", "print(\"Insert command: \") action_creator = ActionCreator() while True: input_data =", "input_data = await input(\"~> \") if not input_data: for task", "def __init__(self, action_queue, config, *args, **kwargs): self.action_queue = action_queue self.config", "action_queue self.config = config async def run(self): await asyncio.sleep(1) print(\"Insert", "*args, **kwargs): self.action_queue = action_queue self.config = config async def", "= await input(\"~> \") if not input_data: for task in", "self.action_queue = action_queue self.config = config async def run(self): await", "def run(self): await asyncio.sleep(1) print(\"Insert command: \") action_creator = ActionCreator()", "if not input_data: for task in asyncio.all_tasks(): task.cancel() break action", "asyncio from ..core.common.io import input from .action_creator import ActionCreator class", "self.config = config async def run(self): await asyncio.sleep(1) print(\"Insert command:", "ActionCreator class REPL: def __init__(self, action_queue, config, *args, **kwargs): self.action_queue", "import ActionCreator class REPL: def __init__(self, action_queue, config, *args, **kwargs):", "action_queue, config, *args, **kwargs): self.action_queue = action_queue self.config = config", ".action_creator import ActionCreator class REPL: def __init__(self, action_queue, config, *args,", "from .action_creator import ActionCreator class REPL: def __init__(self, action_queue, config,", "not input_data: for task in asyncio.all_tasks(): task.cancel() break action =", "while True: input_data = await input(\"~> \") if not input_data:", "\") action_creator = ActionCreator() while True: input_data = await input(\"~>", "action_creator = ActionCreator() while True: input_data = await input(\"~> \")", "input(\"~> \") if not input_data: for task in asyncio.all_tasks(): task.cancel()", "await input(\"~> \") if not input_data: for task in asyncio.all_tasks():", "async def run(self): await asyncio.sleep(1) print(\"Insert command: \") action_creator =", "run(self): await asyncio.sleep(1) print(\"Insert command: \") action_creator = ActionCreator() while", "**kwargs): self.action_queue = action_queue self.config = config async def run(self):", "asyncio.sleep(1) print(\"Insert command: \") action_creator = ActionCreator() while True: input_data", "from ..core.common.io import input from .action_creator import ActionCreator class REPL:", "REPL: def __init__(self, action_queue, config, *args, **kwargs): self.action_queue = action_queue" ]
[ "float(linear_loss.data[0]), global_step) writer.add_scalar(\"linear_l1_loss\", float(linear_l1_loss.data[0]), global_step) writer.add_scalar(\"linear_binary_div_loss\", float( linear_binary_div.data[0]), global_step) if", "plot_alignment(attn.T, path, info=\"{}, {}, step={}\".format( hparams.builder, time_string(), global_step)) def prepare_spec_image(spectrogram):", "decoder_target_mask # shift mask decoder_target_mask = decoder_target_mask[:, r:, :] target_mask", "= min(1, len(input_lengths) - 1) input_length = input_lengths[idx] # Alignment", "frame_positions = Variable(frame_positions) done = Variable(done) target_lengths = Variable(target_lengths) speaker_ids", "as cudnn from torch.utils import data as data_utils from torch.utils.data.sampler", "rate\", current_lr, global_step) global_step += 1 running_loss += loss.data[0] averaged_loss", "-h, --help Show this help message and exit \"\"\" from", "imitates initial decoder states b_pad = r max_target_len += b_pad", "text_positions = text_positions.cuda() frame_positions = frame_positions.cuda() if train_postnet: y =", "[0, 1] spectrogram = (spectrogram - np.min(spectrogram)) / (np.max(spectrogram) -", "datetime.now().strftime('%Y-%m-%d %H:%M') def save_alignment(path, attn): plot_alignment(attn.T, path, info=\"{}, {}, step={}\".format(", "None: linear_output = linear_outputs[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Predicted linear spectrogram\",", ":], decoder_target_mask) mel_loss = (1 - w) * mel_l1_loss +", "tensorboard if log_event_path is None: log_event_path = \"log/run-test\" + str(datetime.now()).replace(\"", "other leaders at the Group of 20 conference.\", \"Generative adversarial", "and global_step % checkpoint_interval == 0: save_states( global_step, writer, mel_outputs,", "def _load_embedding(path, model): state = torch.load(path)[\"state_dict\"] key = \"seq2seq.encoder.embed_tokens.weight\" model.seq2seq.encoder.embed_tokens.weight.data", "\"state_dict\": m.state_dict(), \"optimizer\": optimizer_state, \"global_step\": step, \"global_epoch\": epoch, }, checkpoint_path)", "speaker_id = args[\"--speaker-id\"] speaker_id = int(speaker_id) if speaker_id is not", "= False return texts return texts, speaker_ids else: return texts", "running_loss += loss.data[0] averaged_loss = running_loss / (len(data_loader)) writer.add_scalar(\"loss (per", "max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32) mel_batch = torch.FloatTensor(b)", "if w > 0: binary_div = w * masked_mean(z, mask)", "from: {}\".format(path)) checkpoint = torch.load(path) model.load_state_dict(checkpoint[\"state_dict\"]) if not reset_optimizer: optimizer_state", "# to be set later def _pad(seq, max_len, constant_values=0): return", "= model( x, mel, speaker_ids=speaker_ids, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) elif train_seq2seq:", "= join(checkpoint_dir, \"eval\") os.makedirs(eval_output_dir, exist_ok=True) # hard coded speaker_ids =", "Trump met with other leaders at the Group of 20", "Binary divergence loss if hparams.binary_divergence_weight <= 0: binary_div = Variable(y.data.new(1).zero_())", "the CERN laboratory say they have discovered a new particle.\",", "Use specific speaker of data in case for multi-speaker datasets.", "info=None): fig, ax = plt.subplots() im = ax.imshow( alignment, aspect='auto',", "mel, y, positions, done, target_lengths, speaker_ids) \\ in tqdm(enumerate(data_loader)): model.train()", "{}\".format(log_event_path)) writer = SummaryWriter(log_dir=log_event_path) # Train! try: train(model, data_loader, optimizer,", "alignment alignment_dir = join(checkpoint_dir, \"alignment_ave\") os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir,", "= state[key] # https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 def restore_parts(path, model): print(\"Restore part of", "= prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Target linear spectrogram\", spectrogram, global_step) def logit(x, eps=1e-8):", "train_seq2seq=True, train_postnet=True): if use_cuda: model = model.cuda() linear_dim = model.linear_dim", "self.multi_speaker = len(l) == 5 texts = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[3],", "os.makedirs(checkpoint_dir, exist_ok=True) # Input dataset definitions X = FileSourceDataset(TextDataSource(data_root, speaker_id))", "priority_w > 0: if w > 0: priority_loss = w", "train(model, data_loader, optimizer, writer, init_lr=0.002, checkpoint_dir=None, checkpoint_interval=None, nepochs=None, clip_thresh=1.0, train_seq2seq=True,", "else: return texts def collect_features(self, *args): if self.multi_speaker: text, speaker_id", "speaker_id = self.X[idx] return text, self.Mel[idx], self.Y[idx], speaker_id else: return", "import dv3.audio import dv3.lrschedule import torch from torch.utils import data", "target_lengths = [len(x[1]) for x in batch] max_target_len = max(target_lengths)", "list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[self.col], lines)) paths = list(map(lambda f: join(self.data_root, f),", "if use_cuda: cudnn.benchmark = False _frontend = None # to", "= sequence_length.data.max() batch_size = sequence_length.size(0) seq_range = torch.arange(0, max_len).long() seq_range_expand", "decoder_lengths = target_lengths.long().numpy() // r // downsample_step # Feed data", "= model.postnet checkpoint_path = join( checkpoint_dir, \"checkpoint_step{:09d}{}.pth\".format(global_step, suffix)) optimizer_state =", "= getattr(frontend, hparams.frontend) os.makedirs(checkpoint_dir, exist_ok=True) # Input dataset definitions X", "tqdm(enumerate(data_loader)): model.train() ismultispeaker = speaker_ids is not None # Learning", "- x + eps) def masked_mean(y, mask): # (B, T,", "save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet) print(\"Finished\") sys.exit(0)", "args\" # Override hyper parameters hparams.parse(args[\"--hparams\"]) print(hparams_debug_string()) assert hparams.name ==", "is None: raise RuntimeError(\"Should provide either lengths or mask\") #", "--load-embedding=<path> Load embedding from checkpoint. --speaker-id=<N> Use specific speaker of", "join, expanduser import random import librosa.display from matplotlib import pyplot", "writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # Mel writer.add_image(\"(Eval) Predicted", "np.max(np.abs(signal)) path = join(checkpoint_dir, \"step{:09d}_predicted.wav\".format( global_step)) try: writer.add_audio(\"Predicted audio signal\",", "Predicted audio signal signal = dv3.audio.inv_spectrogram(linear_output.T) signal /= np.max(np.abs(signal)) path", "against freq axis return np.uint8(cm.magma(spectrogram.T) * 255) def eval_model(global_step, writer,", "matplotlib import cm from warnings import warn from dv3.hparams import", "< len(indices): random.shuffle(indices[s:]) return iter(indices) def __len__(self): return len(self.sorted_indices) class", "# Set 0 for zero beginning padding # imitates initial", "1, speaker_id) class LinearSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(LinearSpecDataSource, self).__init__(data_root,", "== 0 self.permutate = permutate def __iter__(self): indices = self.sorted_indices.clone()", "global_step) # Mel writer.add_image(\"(Eval) Predicted mel spectrogram text{}_{}\".format(idx, speaker_str), prepare_spec_image(mel),", "x in batch], dtype=np.float32) y_batch = torch.FloatTensor(c) # text positions", "text, p=0, speaker_id=speaker_id, fast=False) signal /= np.max(np.abs(signal)) # Alignment path", "speaker_ids is None mel_outputs, attn, done_hat, _ = model.seq2seq( x,", "= 1, max_decoder_target_len + 1 # if b_pad > 0:", "path. --train-seq2seq-only Train only seq2seq model. --train-postnet-only Train only postnet", "checkpoint_path) def build_model(): model = getattr(builder, hparams.builder)( n_speakers=hparams.n_speakers, speaker_embed_dim=hparams.speaker_embed_dim, n_vocab=_frontend.n_vocab,", "np.asarray(seq, dtype=np.int32), int(speaker_id) else: return np.asarray(seq, dtype=np.int32) class _NPYDataSource(FileDataSource): def", "mask.expand_as(y) return (y * mask_).sum() / mask_.sum() def spec_loss(y_hat, y,", "len(input_lengths) - 1) input_length = input_lengths[idx] # Alignment # Multi-hop", "global_step) writer.add_scalar(\"linear_binary_div_loss\", float( linear_binary_div.data[0]), global_step) if train_seq2seq and hparams.use_guided_attention: writer.add_scalar(\"attn_loss\",", "lengths or mask\") # (B, T, 1) if mask is", "optimizer.load_state_dict(checkpoint[\"optimizer\"]) global_step = checkpoint[\"global_step\"] global_epoch = checkpoint[\"global_epoch\"] return model def", "use_cuda: if train_seq2seq: x = x.cuda() text_positions = text_positions.cuda() frame_positions", "mel_output, global_step) # Predicted spectrogram if linear_outputs is not None:", "dropout=hparams.dropout, kernel_size=hparams.kernel_size, encoder_channels=hparams.encoder_channels, decoder_channels=hparams.decoder_channels, converter_channels=hparams.converter_channels, use_memory_mask=hparams.use_memory_mask, trainable_positional_encodings=hparams.trainable_positional_encodings, force_monotonic_attention=hparams.force_monotonic_attention, use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input, max_positions=hparams.max_positions,", "= np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32) y_batch", "be trained train_seq2seq = args[\"--train-seq2seq-only\"] train_postnet = args[\"--train-postnet-only\"] # train", "train_postnet): if train_seq2seq and train_postnet: suffix = \"\" m =", "by speaker_id self.speaker_id = speaker_id def collect_files(self): meta = join(self.data_root,", "speaker_id in speaker_ids: speaker_str = \"multispeaker{}\".format(speaker_id) if speaker_id is not", "is not None: print(\"Load optimizer state from {}\".format(path)) optimizer.load_state_dict(checkpoint[\"optimizer\"]) global_step", "not train_seq2seq and not train_postnet: print(\"Training whole model\") train_seq2seq, train_postnet", "getattr(builder, hparams.builder)( n_speakers=hparams.n_speakers, speaker_embed_dim=hparams.speaker_embed_dim, n_vocab=_frontend.n_vocab, embed_dim=hparams.text_embed_dim, mel_dim=hparams.num_mels, linear_dim=hparams.fft_size // 2", "# https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 def restore_parts(path, model): print(\"Restore part of the model", "p=0, speaker_id=speaker_id, fast=False) signal /= np.max(np.abs(signal)) # Alignment path =", "signal /= np.max(np.abs(signal)) path = join(checkpoint_dir, \"step{:09d}_predicted.wav\".format( global_step)) try: writer.add_audio(\"Predicted", "constant_values=1) for x in batch]) done = torch.FloatTensor(done).unsqueeze(-1) if multi_speaker:", "= s + batch_group_size random.shuffle(indices[s:e]) # Permutate batches if self.permutate:", "max_len).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand = Variable(seq_range_expand) if sequence_length.is_cuda:", "+ done_loss elif train_seq2seq: loss = mel_loss + done_loss elif", "= 0 global_epoch = 0 use_cuda = torch.cuda.is_available() if use_cuda:", "None: load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer) if checkpoint_path is not None:", "np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int) x_batch = torch.LongTensor(a)", "for x in batch], dtype=np.float32) mel_batch = torch.FloatTensor(b) c =", ":].contiguous() # Lengths input_lengths = input_lengths.long().numpy() decoder_lengths = target_lengths.long().numpy() //", "= np.zeros((B, max_target_len, max_input_len), dtype=np.float32) for b in range(B): W[b]", "= True, True if train_seq2seq: print(\"Training seq2seq model\") elif train_postnet:", "np.exp(-(n / N - t / T)**2 / (2 *", "assert max_target_len % downsample_step == 0 # Set 0 for", "frame_positions.cuda() if train_postnet: y = y.cuda() mel = mel.cuda() done,", "attn, mel, y, input_lengths, checkpoint_dir=None): print(\"Save intermediate states at step", "\"log/run-test\" + str(datetime.now()).replace(\" \", \"_\") print(\"Los event path: {}\".format(log_event_path)) writer", "train_postnet = True, True if train_seq2seq: print(\"Training seq2seq model\") elif", "else None torch.save({ \"state_dict\": m.state_dict(), \"optimizer\": optimizer_state, \"global_step\": step, \"global_epoch\":", "global_step, sample_rate=fs) except Exception as e: warn(str(e)) pass def save_states(global_step,", "in tqdm(enumerate(data_loader)): model.train() ismultispeaker = speaker_ids is not None #", "collate_fn=collate_fn, pin_memory=hparams.pin_memory) print(\"dataloader_prepared\") # Model model = build_model() if use_cuda:", "spectrogram if linear_outputs is not None: linear_output = linear_outputs[idx].cpu().data.numpy() spectrogram", "# idx = np.random.randint(0, len(input_lengths)) idx = min(1, len(input_lengths) -", "= torch.nn.utils.clip_grad_norm( model.get_trainable_parameters(), clip_thresh) optimizer.step() # Logs writer.add_scalar(\"loss\", float(loss.data[0]), global_step)", "mask=None, max_len=None): if lengths is None and mask is None:", "not None else \"single\" for idx, text in enumerate(texts): signal,", "mel_dim=hparams.num_mels, linear_dim=hparams.fft_size // 2 + 1, r=hparams.outputs_per_step, downsample_step=hparams.downsample_step, padding_idx=hparams.padding_idx, dropout=hparams.dropout,", "+ info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout() plt.savefig(path, format='png') plt.close() class", "text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) # reshape mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1))", "way to measure the acute emotional intelligence that has never", "preprocessed features. --checkpoint-dir=<dir> Directory where to save model checkpoints [default:", "1] spectrogram = (spectrogram - np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram))", "sequence_length.size(0) seq_range = torch.arange(0, max_len).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand", "model. --train-postnet-only Train only postnet model. --restore-parts=<path> Restore part of", "input_length = input_lengths[idx] # Alignment # Multi-hop attention if attn", "= frame_positions.cuda() if train_postnet: y = y.cuda() mel = mel.cuda()", "linear_dim) linear_l1_loss, linear_binary_div = spec_loss( linear_outputs[:, :-r, :], y[:, r:,", "random import librosa.display from matplotlib import pyplot as plt import", "data_utils from torch.utils.data.sampler import Sampler import numpy as np from", "attn, done_hat, _ = model.seq2seq( x, mel, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths)", "seq_length_expand).float() class MaskedL1Loss(nn.Module): def __init__(self): super(MaskedL1Loss, self).__init__() self.criterion = nn.L1Loss(size_average=False)", "in range(B): W[b] = guided_attention(input_lengths[b], max_input_len, target_lengths[b], max_target_len, g).T return", "= args[\"--checkpoint\"] checkpoint_seq2seq_path = args[\"--checkpoint-seq2seq\"] checkpoint_postnet_path = args[\"--checkpoint-postnet\"] load_embedding =", "= i * batch_group_size e = s + batch_group_size random.shuffle(indices[s:e])", "import hparams, hparams_debug_string fs = hparams.sample_rate global_step = 0 global_epoch", "Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id)) Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id)) # Prepare", "def __init__(self): super(MaskedL1Loss, self).__init__() self.criterion = nn.L1Loss(size_average=False) def forward(self, input,", "y = y.cuda() mel = mel.cuda() done, target_lengths = done.cuda(),", "def __init__(self, data_root, speaker_id=None): super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id) class LinearSpecDataSource(_NPYDataSource):", "model( x, mel, speaker_ids=speaker_ids, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) elif train_seq2seq: assert", "len(batch[0]) == 4 # Lengths input_lengths = [len(x[0]) for x", "is not None: load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer) if checkpoint_seq2seq_path is", "num_workers=hparams.num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=hparams.pin_memory) print(\"dataloader_prepared\") # Model model = build_model()", "text-to-speech synthesis model. usage: train.py [options] options: --data-root=<dir> Directory contains", "% downsample_step != 0: max_target_len += downsample_step - max_target_len %", "Handle last elements s += batch_group_size if s < len(indices):", "W[b] = guided_attention(input_lengths[b], max_input_len, target_lengths[b], max_target_len, g).T return W def", "Group of 20 conference.\", \"Generative adversarial network or variational auto-encoder.\",", "if downsample_step > 1: mel = mel[:, 0::downsample_step, :].contiguous() #", "state = torch.load(path)[\"state_dict\"] model_dict = model.state_dict() valid_state_dict = {k: v", "last elements s += batch_group_size if s < len(indices): random.shuffle(indices[s:])", "D) mask_ = mask.expand_as(input) loss = self.criterion(input * mask_, target", "= np.array([_pad(np.zeros(len(x[1]) // r // downsample_step - 1), max_decoder_target_len, constant_values=1)", "input_lengths, mel_batch, y_batch, \\ (text_positions, frame_positions), done, target_lengths, speaker_ids def", "mel spectrogram\", mel_output, global_step) # Target spectrogram if linear_outputs is", "- np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram)) spectrogram = np.flip(spectrogram, axis=1)", "_, mel = synthesis.tts( model, text, p=0, speaker_id=speaker_id, fast=False) signal", "__init__(self, lengths, batch_size=16, batch_group_size=None, permutate=True): self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths)) self.batch_size", "batch_group_size = self.batch_group_size s, e = 0, 0 for i", "= len(input_lengths) max_input_len = input_lengths.max() W = np.zeros((B, max_target_len, max_input_len),", "global_step) global_step += 1 running_loss += loss.data[0] averaged_loss = running_loss", "= sequence_length.unsqueeze(1) \\ .expand_as(seq_range_expand) return (seq_range_expand < seq_length_expand).float() class MaskedL1Loss(nn.Module):", "__init__(self, data_root, col, speaker_id=None): self.data_root = data_root self.col = col", "or len(l) == 5 multi_speaker = len(l) == 5 self.frame_lengths", "global_step) if train_seq2seq and hparams.use_guided_attention: writer.add_scalar(\"attn_loss\", float(attn_loss.data[0]), global_step) if clip_thresh", "= spec_loss( linear_outputs[:, :-r, :], y[:, r:, :], target_mask, priority_bin=n_priority_freq,", "+ (1 - w) * l1(y_hat[:, :, :priority_bin], y[:, :,", "Load checkpoints if checkpoint_postnet_path is not None: load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer,", "done = Variable(done) target_lengths = Variable(target_lengths) speaker_ids = Variable(speaker_ids) if", "max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32) y_batch = torch.FloatTensor(c)", "checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.checkpoint_interval, nepochs=hparams.nepochs, clip_thresh=hparams.clip_thresh, train_seq2seq=train_seq2seq, train_postnet=train_postnet) except KeyboardInterrupt: save_checkpoint( model,", "priority_w) * l1_loss + priority_w * priority_loss # Binary divergence", "import dirname, join from tqdm import tqdm, trange from datetime", "x, mel, speaker_ids=speaker_ids, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) elif train_seq2seq: assert speaker_ids", "max_target_len % r != 0: max_target_len += r - max_target_len", "attn, done_hat = model( x, mel, speaker_ids=speaker_ids, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths)", "(len(data_loader)) writer.add_scalar(\"loss (per epoch)\", averaged_loss, global_epoch) print(\"Loss: {}\".format(running_loss / (len(data_loader))))", "audio signal\", signal, global_step, sample_rate=fs) except Exception as e: warn(str(e))", "attention if attn is not None and attn.dim() == 4:", "if w > 0: assert mask is not None l1_loss", "float(linear_l1_loss.data[0]), global_step) writer.add_scalar(\"linear_binary_div_loss\", float( linear_binary_div.data[0]), global_step) if train_seq2seq and hparams.use_guided_attention:", "== 4 or len(l) == 5 self.multi_speaker = len(l) ==", "if mask is None: mask = sequence_mask(lengths, max_len).unsqueeze(-1) # (B,", "script for seq2seq text-to-speech synthesis model. usage: train.py [options] options:", "mode='constant', constant_values=constant_values) def _pad_2d(x, max_len, b_pad=0): x = np.pad(x, [(b_pad,", "+ w * mel_binary_div # done: if train_seq2seq: done_loss =", "mel_outputs is not None: mel_output = mel_outputs[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output))", "np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # save files as well", "self.speaker_id paths = list(np.array(paths)[indices]) self.frame_lengths = list(np.array(self.frame_lengths)[indices]) # aha, need", "(fs * 0.5) * linear_dim) linear_l1_loss, linear_binary_div = spec_loss( linear_outputs[:,", "_load_embedding(path, model): state = torch.load(path)[\"state_dict\"] key = \"seq2seq.encoder.embed_tokens.weight\" model.seq2seq.encoder.embed_tokens.weight.data =", "hparams.outputs_per_step downsample_step = hparams.downsample_step multi_speaker = len(batch[0]) == 4 #", "dv3.deepvoice3_pytorch import frontend, builder import dv3.audio import dv3.lrschedule import torch", "l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines)) # Filter by speaker_id # using multi-speaker", "Multi-hop attention if attn is not None and attn.dim() ==", "to int self.frame_lengths = list(map(int, self.frame_lengths)) return paths def collect_features(self,", "paths = list(map(lambda f: join(self.data_root, f), paths)) if multi_speaker and", "= args[\"--checkpoint-seq2seq\"] checkpoint_postnet_path = args[\"--checkpoint-postnet\"] load_embedding = args[\"--load-embedding\"] checkpoint_restore_parts =", "torch.utils import data as data_utils from torch.autograd import Variable from", "try: writer.add_audio(\"Predicted audio signal\", signal, global_step, sample_rate=fs) except Exception as", "l: l.decode(\"utf-8\").split(\"|\")[3], lines)) if self.multi_speaker: speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]),", "mel_output, global_step) # Target spectrogram if linear_outputs is not None:", "0, speaker_id) class PartialyRandomizedSimilarTimeLengthSampler(Sampler): \"\"\"Partially randmoized sampler 1. Sort by", "ax=ax) xlabel = 'Decoder timestep' if info is not None:", "a = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int) x_batch", "import torch.backends.cudnn as cudnn from torch.utils import data as data_utils", "mask, priority_bin=None, priority_w=0): masked_l1 = MaskedL1Loss() l1 = nn.L1Loss() w", "except Exception as e: warn(str(e)) pass def save_states(global_step, writer, mel_outputs,", "done = np.array([_pad(np.zeros(len(x[1]) // r // downsample_step - 1), max_decoder_target_len,", "'\\n\\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout() plt.savefig(path, format='png') plt.close()", "collect_features(self, path): return np.load(path) class MelSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None):", "cudnn from torch.utils import data as data_utils from torch.utils.data.sampler import", "hparams.presets[hparams.preset] import json hparams.parse_json(json.dumps(preset)) print(\"Override hyper parameters with preset \\\"{}\\\":", "librosa.display from matplotlib import pyplot as plt import sys import", "https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 def restore_parts(path, model): print(\"Restore part of the model from:", "= nn.BCELoss() assert train_seq2seq or train_postnet global global_step, global_epoch while", "train(model, data_loader, optimizer, writer, init_lr=hparams.initial_learning_rate, checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.checkpoint_interval, nepochs=hparams.nepochs, clip_thresh=hparams.clip_thresh, train_seq2seq=train_seq2seq,", "alignment in enumerate(attn): alignment = alignment[idx].cpu().data.numpy() tag = \"alignment_layer{}\".format(i +", "force_monotonic_attention=hparams.force_monotonic_attention, use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input, max_positions=hparams.max_positions, speaker_embedding_weight_std=hparams.speaker_embedding_weight_std, freeze_embedding=hparams.freeze_embedding, window_ahead=hparams.window_ahead, window_backward=hparams.window_backward, key_projection=hparams.key_projection, value_projection=hparams.value_projection, )", "T)**2 / (2 * g * g)) return W def", "in batch], dtype=np.int) text_positions = torch.LongTensor(text_positions) max_decoder_target_len = max_target_len //", "sequence_length.data.max() batch_size = sequence_length.size(0) seq_range = torch.arange(0, max_len).long() seq_range_expand =", "mel_outputs, linear_outputs, attn, mel, y, input_lengths, checkpoint_dir=None): print(\"Save intermediate states", "D) mask_ = mask.expand_as(y) return (y * mask_).sum() / mask_.sum()", "for x in batch]) else: speaker_ids = None return x_batch,", "xlabel += '\\n\\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout() plt.savefig(path,", "len(input_lengths) max_input_len = input_lengths.max() W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32)", "speaker_id=None): self.data_root = data_root self.speaker_ids = None self.multi_speaker = False", "= Variable(seq_range_expand) if sequence_length.is_cuda: seq_range_expand = seq_range_expand.cuda() seq_length_expand = sequence_length.unsqueeze(1)", "multi_speaker = len(l) == 5 self.frame_lengths = list( map(lambda l:", "sequence_mask(lengths, max_len).unsqueeze(-1) # (B, T, D) mask_ = mask.expand_as(input) loss", "axis return np.uint8(cm.magma(spectrogram.T) * 255) def eval_model(global_step, writer, model, checkpoint_dir,", "using multi-speaker dataset as a single speaker dataset if self.speaker_id", "1), max_decoder_target_len, constant_values=1) for x in batch]) done = torch.FloatTensor(done).unsqueeze(-1)", "data in case for multi-speaker datasets. -h, --help Show this", "Learning rate schedule if hparams.lr_schedule is not None: lr_schedule_f =", "call Stella.\", \"Some have accepted this as a miracle without", "* masked_l1( y_hat[:, :, :priority_bin], y[:, :, :priority_bin], mask=mask) \\", "else: y_hat_logits = logit(y_hat) z = -y * y_hat_logits +", "optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None torch.save({ \"state_dict\": m.state_dict(),", "= lines[0].decode(\"utf-8\").split(\"|\") assert len(l) == 4 or len(l) == 5", "- np.exp(-(n / N - t / T)**2 / (2", "random.shuffle(indices[s:]) return iter(indices) def __len__(self): return len(self.sorted_indices) class PyTorchDataset(object): def", "print(\"Override hyper parameters with preset \\\"{}\\\": {}\".format( hparams.preset, json.dumps(preset, indent=4)))", "None l1_loss = l1(y_hat, y) # Priority L1 loss if", "# Predicted mel spectrogram if mel_outputs is not None: mel_output", "model.state_dict() valid_state_dict = {k: v for k, v in state.items()", "* linear_binary_div # Combine losses if train_seq2seq and train_postnet: loss", "Variable(target_lengths) speaker_ids = Variable(speaker_ids) if ismultispeaker else None if use_cuda:", "return iter(indices) def __len__(self): return len(self.sorted_indices) class PyTorchDataset(object): def __init__(self,", "origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel = 'Decoder timestep' if info", "1 frame_positions = torch.arange(s, e).long().unsqueeze(0).expand( len(batch), max_decoder_target_len) # done flags", "self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths)) self.batch_size = batch_size if batch_group_size is", "# Load checkpoints if checkpoint_postnet_path is not None: load_checkpoint(checkpoint_postnet_path, model.postnet,", "= decoder_target_mask # shift mask decoder_target_mask = decoder_target_mask[:, r:, :]", "model = model.cuda() optimizer = optim.Adam(model.get_trainable_parameters(), lr=hparams.initial_learning_rate, betas=( hparams.adam_beta1, hparams.adam_beta2),", "global_step) writer.add_scalar(\"mel_l1_loss\", float(mel_l1_loss.data[0]), global_step) writer.add_scalar(\"mel_binary_div_loss\", float(mel_binary_div.data[0]), global_step) if train_postnet: writer.add_scalar(\"linear_loss\",", "self.permutate: perm = np.arange(len(indices[:e]) // self.batch_size) random.shuffle(perm) indices[:e] = indices[:e].view(-1,", "reset_optimizer) if checkpoint_seq2seq_path is not None: load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer)", "done_hat = None, None, None # Losses w = hparams.binary_divergence_weight", "None: mel_output = mel[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Target mel spectrogram\",", "path. --reset-optimizer Reset optimizer. --load-embedding=<path> Load embedding from checkpoint. --speaker-id=<N>", "at step {}\".format(global_step)) # idx = np.random.randint(0, len(input_lengths)) idx =", "* masked_mean(z, mask) + (1 - w) * z.mean() else:", "speaker_ids = Variable(speaker_ids) if ismultispeaker else None if use_cuda: if", "freq axis return np.uint8(cm.magma(spectrogram.T) * 255) def eval_model(global_step, writer, model,", "parameters [default: ]. --checkpoint=<path> Restore model from checkpoint path if", "writer.add_image(\"(Eval) Predicted mel spectrogram text{}_{}\".format(idx, speaker_str), prepare_spec_image(mel), global_step) # Audio", "1) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # save files", "max_input_len = max(input_lengths) target_lengths = [len(x[1]) for x in batch]", "in batch], dtype=np.float32) mel_batch = torch.FloatTensor(b) c = np.array([_pad_2d(x[2], max_target_len,", "the Group of 20 conference.\", \"Generative adversarial network or variational", "alignment_dir = join(checkpoint_dir, \"alignment_layer{}\".format(i + 1)) os.makedirs(alignment_dir, exist_ok=True) path =", "return np.pad(seq, (0, max_len - len(seq)), mode='constant', constant_values=constant_values) def _pad_2d(x,", "max_target_len % r == 0 if max_target_len % downsample_step !=", "open(meta, \"rb\") as f: lines = f.readlines() l = lines[0].decode(\"utf-8\").split(\"|\")", "- 1 frame_positions = torch.arange(s, e).long().unsqueeze(0).expand( len(batch), max_decoder_target_len) # done", "not None: mel_output = mel_outputs[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Predicted mel", "T, D) mask_ = mask.expand_as(y) return (y * mask_).sum() /", "train_seq2seq and train_postnet: mel_outputs, linear_outputs, attn, done_hat = model( x,", "mel_outputs, linear_outputs, attn, done_hat = model( x, mel, speaker_ids=speaker_ids, text_positions=text_positions,", "soft_mask.cuda() if use_cuda else soft_mask attn_loss = (attn * soft_mask).mean()", "model, optimizer, reset_optimizer) # Load embedding if load_embedding is not", "info is not None: xlabel += '\\n\\n' + info plt.xlabel(xlabel)", "MaskedL1Loss() l1 = nn.L1Loss() w = hparams.masked_loss_weight # L1 loss", "self.Mel[idx], self.Y[idx] def __len__(self): return len(self.X) def sequence_mask(sequence_length, max_len=None): if", "* mask_, target * mask_) return loss / mask_.sum() def", "input_lengths, mel, y, positions, done, target_lengths, speaker_ids) \\ in tqdm(enumerate(data_loader)):", "return text, self.Mel[idx], self.Y[idx], speaker_id else: return self.X[idx], self.Mel[idx], self.Y[idx]", "# Lengths input_lengths = input_lengths.long().numpy() decoder_lengths = target_lengths.long().numpy() // r", "spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0): masked_l1 = MaskedL1Loss() l1 =", "= 1 - np.exp(-(n / N - t / T)**2", "else: speaker_ids = None return x_batch, input_lengths, mel_batch, y_batch, \\", "checkpoint_postnet_path is not None: load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer) if checkpoint_seq2seq_path", "0 self.permutate = permutate def __iter__(self): indices = self.sorted_indices.clone() batch_group_size", "= 'Decoder timestep' if info is not None: xlabel +=", "w > 0: binary_div = w * masked_mean(z, mask) +", "float(mel_l1_loss.data[0]), global_step) writer.add_scalar(\"mel_binary_div_loss\", float(mel_binary_div.data[0]), global_step) if train_postnet: writer.add_scalar(\"linear_loss\", float(linear_loss.data[0]), global_step)", "None: print(\"Load optimizer state from {}\".format(path)) optimizer.load_state_dict(checkpoint[\"optimizer\"]) global_step = checkpoint[\"global_step\"]", "= args[\"--reset-optimizer\"] # Which model to be trained train_seq2seq =", "init_lr=hparams.initial_learning_rate, checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.checkpoint_interval, nepochs=hparams.nepochs, clip_thresh=hparams.clip_thresh, train_seq2seq=train_seq2seq, train_postnet=train_postnet) except KeyboardInterrupt: save_checkpoint(", "writer = SummaryWriter(log_dir=log_event_path) # Train! try: train(model, data_loader, optimizer, writer,", "acute emotional intelligence that has never gone out of style.\",", "and train_postnet: suffix = \"\" m = model elif train_seq2seq:", "model elif train_seq2seq: suffix = \"_seq2seq\" m = model.seq2seq elif", "\"__main__\": args = docopt(__doc__) print(\"Command line args:\\n\", args) checkpoint_dir =", "return model def load_checkpoint(path, model, optimizer, reset_optimizer): global global_step global", "32, len(self.lengths)) if batch_group_size % batch_size != 0: batch_group_size -=", "= X.file_data_source.multi_speaker def __getitem__(self, idx): if self.multi_speaker: text, speaker_id =", "model.seq2seq elif train_postnet: suffix = \"_postnet\" m = model.postnet checkpoint_path", "signal, global_step, sample_rate=fs) except Exception as e: warn(str(e)) pass dv3.audio.save_wav(signal,", "checkpoint_dir, ismultispeaker): # harded coded texts = [ \"Scientists at", "linear: if train_postnet: n_priority_freq = int(hparams.priority_freq / (fs * 0.5)", "Sort by lengths 2. Pick a small patch and randomize", "aha, need to cast numpy.int64 to int self.frame_lengths = list(map(int,", "import optim import torch.backends.cudnn as cudnn from torch.utils import data", "r = hparams.outputs_per_step downsample_step = hparams.downsample_step current_lr = init_lr binary_criterion", "Variable(y.data.new(1).zero_()) else: y_hat_logits = logit(y_hat) z = -y * y_hat_logits", "elif train_postnet: print(\"Training postnet model\") else: assert False, \"must be", "== 5 multi_speaker = len(l) == 5 self.frame_lengths = list(", "= linear_outputs[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Predicted linear spectrogram\", spectrogram, global_step)", "speaker_embedding_weight_std=hparams.speaker_embedding_weight_std, freeze_embedding=hparams.freeze_embedding, window_ahead=hparams.window_ahead, window_backward=hparams.window_backward, key_projection=hparams.key_projection, value_projection=hparams.value_projection, ) return model def", "int(l.decode(\"utf-8\").split(\"|\")[-1]), lines)) # Filter by speaker_id # using multi-speaker dataset", "betas=( hparams.adam_beta1, hparams.adam_beta2), eps=hparams.adam_eps, weight_decay=hparams.weight_decay) if checkpoint_restore_parts is not None:", "g).T return W def train(model, data_loader, optimizer, writer, init_lr=0.002, checkpoint_dir=None,", "data_root, speaker_id=None): super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id) class LinearSpecDataSource(_NPYDataSource): def __init__(self,", "= model elif train_seq2seq: suffix = \"_seq2seq\" m = model.seq2seq", "not None and hparams.preset != \"\": preset = hparams.presets[hparams.preset] import", "= torch.FloatTensor(done).unsqueeze(-1) if multi_speaker: speaker_ids = torch.LongTensor([x[3] for x in", "torch.save({ \"state_dict\": m.state_dict(), \"optimizer\": optimizer_state, \"global_step\": step, \"global_epoch\": epoch, },", "of data in case for multi-speaker datasets. -h, --help Show", "Feed data x, mel, y = Variable(x), Variable(mel), Variable(y) text_positions", "linear_loss = (1 - w) * linear_l1_loss + w *", "def logit(x, eps=1e-8): return torch.log(x + eps) - torch.log(1 -", "attn_loss = (attn * soft_mask).mean() loss += attn_loss if global_step", "is not None: indices = np.array(speaker_ids) == self.speaker_id texts =", "text, speaker_id = args else: text = args[0] seq =", "initial decoder states b_pad = r max_target_len += b_pad *", "w > 0: assert mask is not None l1_loss =", "mel: if train_seq2seq: mel_l1_loss, mel_binary_div = spec_loss( mel_outputs[:, :-r, :],", "# Multi-hop attention if attn is not None and attn.dim()", "y[:, r:, :], target_mask, priority_bin=n_priority_freq, priority_w=hparams.priority_freq_weight) linear_loss = (1 -", "--hparams=<parmas> Hyper parameters [default: ]. --checkpoint=<path> Restore model from checkpoint", "\"data\", \"ljspeech\") log_event_path = args[\"--log-event-path\"] reset_optimizer = args[\"--reset-optimizer\"] # Which", "__iter__(self): indices = self.sorted_indices.clone() batch_group_size = self.batch_group_size s, e =", "text, self.Mel[idx], self.Y[idx], speaker_id else: return self.X[idx], self.Mel[idx], self.Y[idx] def", ":priority_bin]) l1_loss = (1 - priority_w) * l1_loss + priority_w", "# Feed data x, mel, y = Variable(x), Variable(mel), Variable(y)", "torch.log(1 - x + eps) def masked_mean(y, mask): # (B,", "dtype=np.int) text_positions = torch.LongTensor(text_positions) max_decoder_target_len = max_target_len // r //", "is None: max_len = sequence_length.data.max() batch_size = sequence_length.size(0) seq_range =", "batch_group_size = min(batch_size * 32, len(self.lengths)) if batch_group_size % batch_size", "leaders at the Group of 20 conference.\", \"Generative adversarial network", "epoch, train_seq2seq, train_postnet): if train_seq2seq and train_postnet: suffix = \"\"", "signal = dv3.audio.inv_spectrogram(linear_output.T) signal /= np.max(np.abs(signal)) path = join(checkpoint_dir, \"step{:09d}_predicted.wav\".format(", "1)) os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, \"step{:09d}_layer_{}_alignment.png\".format( global_step, i +", "Directory where to save model checkpoints [default: checkpoints]. --hparams=<parmas> Hyper", "speaker_ids else: return texts def collect_features(self, *args): if self.multi_speaker: text,", "print(\"Restore part of the model from: {}\".format(path)) state = torch.load(path)[\"state_dict\"]", "optim.Adam(model.get_trainable_parameters(), lr=hparams.initial_learning_rate, betas=( hparams.adam_beta1, hparams.adam_beta2), eps=hparams.adam_eps, weight_decay=hparams.weight_decay) if checkpoint_restore_parts is", "None torch.save({ \"state_dict\": m.state_dict(), \"optimizer\": optimizer_state, \"global_step\": step, \"global_epoch\": epoch,", "b_pad=0): x = np.pad(x, [(b_pad, max_len - len(x) - b_pad),", "[0, 1, 10] if ismultispeaker else [None] for speaker_id in", "= \"log/run-test\" + str(datetime.now()).replace(\" \", \"_\") print(\"Los event path: {}\".format(log_event_path))", "model if train_seq2seq and train_postnet: mel_outputs, linear_outputs, attn, done_hat =", "Filter by speaker_id # using multi-speaker dataset as a single", "done) # linear: if train_postnet: n_priority_freq = int(hparams.priority_freq / (fs", "if train_seq2seq: writer.add_scalar(\"done_loss\", float(done_loss.data[0]), global_step) writer.add_scalar(\"mel loss\", float(mel_loss.data[0]), global_step) writer.add_scalar(\"mel_l1_loss\",", "max_T, g): W = np.zeros((max_N, max_T), dtype=np.float32) for n in", "np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # Mel writer.add_image(\"(Eval) Predicted mel", "= w * masked_l1( y_hat[:, :, :priority_bin], y[:, :, :priority_bin],", "0: if w > 0: priority_loss = w * masked_l1(", "speaker_id=None): super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id) class LinearSpecDataSource(_NPYDataSource): def __init__(self, data_root,", "0 if max_target_len % downsample_step != 0: max_target_len += downsample_step", "writer.add_image(\"Target mel spectrogram\", mel_output, global_step) # Target spectrogram if linear_outputs", "model.seq2seq.encoder.embed_tokens.weight.data = state[key] # https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 def restore_parts(path, model): print(\"Restore part", "multi-speaker dataset as a single speaker dataset if self.speaker_id is", "signal, global_step, sample_rate=fs) except Exception as e: warn(str(e)) pass def", "Show this help message and exit \"\"\" from docopt import", "self.multi_speaker = False # If not None, filter by speaker_id", "explanation.\", ] import dv3.synthesis synthesis._frontend = _frontend eval_output_dir = join(checkpoint_dir,", "dtype=np.float32) mel_batch = torch.FloatTensor(b) c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for", "train_postnet: writer.add_scalar(\"linear_loss\", float(linear_loss.data[0]), global_step) writer.add_scalar(\"linear_l1_loss\", float(linear_l1_loss.data[0]), global_step) writer.add_scalar(\"linear_binary_div_loss\", float( linear_binary_div.data[0]),", "import data as data_utils from torch.autograd import Variable from torch", "# Input dataset definitions X = FileSourceDataset(TextDataSource(data_root, speaker_id)) Mel =", "\"ljspeech\") log_event_path = args[\"--log-event-path\"] reset_optimizer = args[\"--reset-optimizer\"] # Which model", "\"deepvoice3\" # Presets if hparams.preset is not None and hparams.preset", "for param_group in optimizer.param_groups: param_group['lr'] = current_lr optimizer.zero_grad() # Used", "len(x) - b_pad), (0, 0)], mode=\"constant\", constant_values=0) return x def", "lines[0].decode(\"utf-8\").split(\"|\") assert len(l) == 4 or len(l) == 5 multi_speaker", "(len(data_loader)))) global_epoch += 1 def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch,", "len(l) == 4 or len(l) == 5 self.multi_speaker = len(l)", "from os.path import dirname, join from tqdm import tqdm, trange", "self.Y[idx], speaker_id else: return self.X[idx], self.Mel[idx], self.Y[idx] def __len__(self): return", "not specified if not train_seq2seq and not train_postnet: print(\"Training whole", "# using multi-speaker dataset as a single speaker dataset indices", "batch_size if batch_group_size is None: batch_group_size = min(batch_size * 32,", "train_seq2seq: done_loss = binary_criterion(done_hat, done) # linear: if train_postnet: n_priority_freq", "positions s, e = 1, max_decoder_target_len + 1 # if", "from matplotlib import pyplot as plt import sys import os", "eps) - torch.log(1 - x + eps) def masked_mean(y, mask):", "input_lengths, checkpoint_dir=None): print(\"Save intermediate states at step {}\".format(global_step)) # idx", "writer.add_scalar(\"loss\", float(loss.data[0]), global_step) if train_seq2seq: writer.add_scalar(\"done_loss\", float(done_loss.data[0]), global_step) writer.add_scalar(\"mel loss\",", "randmoized sampler 1. Sort by lengths 2. Pick a small", "target_lengths.long().numpy() // r // downsample_step # Feed data x, mel,", "import dv3.lrschedule import torch from torch.utils import data as data_utils", "x_batch = torch.LongTensor(a) input_lengths = torch.LongTensor(input_lengths) target_lengths = torch.LongTensor(target_lengths) b", "adversarial network or variational auto-encoder.\", \"Please call Stella.\", \"Some have", "elif train_postnet: suffix = \"_postnet\" m = model.postnet checkpoint_path =", "checkpoint_dir=None): print(\"Save intermediate states at step {}\".format(global_step)) # idx =", "x, mel, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) # reshape mel_outputs = mel_outputs.view(len(mel),", "= None, None # Apply model if train_seq2seq and train_postnet:", "model checkpoints [default: checkpoints]. --hparams=<parmas> Hyper parameters [default: ]. --checkpoint=<path>", "max_positions=hparams.max_positions, speaker_embedding_weight_std=hparams.speaker_embedding_weight_std, freeze_embedding=hparams.freeze_embedding, window_ahead=hparams.window_ahead, window_backward=hparams.window_backward, key_projection=hparams.key_projection, value_projection=hparams.value_projection, ) return model", "= \"alignment_layer{}\".format(i + 1) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)", "1, r=hparams.outputs_per_step, downsample_step=hparams.downsample_step, padding_idx=hparams.padding_idx, dropout=hparams.dropout, kernel_size=hparams.kernel_size, encoder_channels=hparams.encoder_channels, decoder_channels=hparams.decoder_channels, converter_channels=hparams.converter_channels, use_memory_mask=hparams.use_memory_mask,", "as data_utils from torch.autograd import Variable from torch import nn", "fig.colorbar(im, ax=ax) xlabel = 'Decoder timestep' if info is not", "encoding text_positions, frame_positions = positions # Downsample mel spectrogram if", "hparams.sample_rate global_step = 0 global_epoch = 0 use_cuda = torch.cuda.is_available()", "fig, ax = plt.subplots() im = ax.imshow( alignment, aspect='auto', origin='lower',", "mask=mask) \\ + (1 - w) * l1(y_hat[:, :, :priority_bin],", "path = join(alignment_dir, \"step{:09d}_layer_{}_alignment.png\".format( global_step, i + 1)) save_alignment(path, alignment)", "t / T)**2 / (2 * g * g)) return", "= self.X[idx] return text, self.Mel[idx], self.Y[idx], speaker_id else: return self.X[idx],", "style.\", \"President Trump met with other leaders at the Group", "writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # Predicted mel spectrogram", "checkpoint_dir, epoch, train_seq2seq, train_postnet): if train_seq2seq and train_postnet: suffix =", "meta = join(self.data_root, \"train.txt\") with open(meta, \"rb\") as f: lines", "w * linear_binary_div # Combine losses if train_seq2seq and train_postnet:", "alignment, aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel = 'Decoder timestep'", "from torch import nn from torch import optim import torch.backends.cudnn", "= Variable(speaker_ids) if ismultispeaker else None if use_cuda: if train_seq2seq:", "dtype=np.int32) class _NPYDataSource(FileDataSource): def __init__(self, data_root, col, speaker_id=None): self.data_root =", "# mel: if train_seq2seq: mel_l1_loss, mel_binary_div = spec_loss( mel_outputs[:, :-r,", "!= 0: max_target_len += r - max_target_len % r assert", "mel_outputs, attn, done_hat, _ = model.seq2seq( x, mel, text_positions=text_positions, frame_positions=frame_positions,", "* downsample_step), max_len=mel.size(1)).unsqueeze(-1) if downsample_step > 1: # spectrogram-domain mask", "assert batch_group_size % batch_size == 0 self.permutate = permutate def", "args[\"--log-event-path\"] reset_optimizer = args[\"--reset-optimizer\"] # Which model to be trained", "(B, T, D) mask_ = mask.expand_as(input) loss = self.criterion(input *", "# Model model = build_model() if use_cuda: model = model.cuda()", "join(self.data_root, f), paths)) if multi_speaker and self.speaker_id is not None:", "# Learning rate schedule if hparams.lr_schedule is not None: lr_schedule_f", "a single speaker dataset if self.speaker_id is not None: indices", "# Alignment path = join(eval_output_dir, \"step{:09d}_text{}_{}_alignment.png\".format( global_step, idx, speaker_str)) save_alignment(path,", "L1 loss if w > 0: assert mask is not", "torch.load(path)[\"state_dict\"] key = \"seq2seq.encoder.embed_tokens.weight\" model.seq2seq.encoder.embed_tokens.weight.data = state[key] # https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 def", "input_lengths, checkpoint_dir) save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet)", "= (1 - priority_w) * l1_loss + priority_w * priority_loss", "at the Group of 20 conference.\", \"Generative adversarial network or", "self.Y[idx] def __len__(self): return len(self.X) def sequence_mask(sequence_length, max_len=None): if max_len", "global global_epoch print(\"Load checkpoint from: {}\".format(path)) checkpoint = torch.load(path) model.load_state_dict(checkpoint[\"state_dict\"])", "args[\"--data-root\"] if data_root is None: data_root = join(dirname(__file__), \"data\", \"ljspeech\")", "save_alignment(path, alignment) tag = \"averaged_alignment\" writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255),", "specified wrong args\" # Override hyper parameters hparams.parse(args[\"--hparams\"]) print(hparams_debug_string()) assert", "seq_range_expand = Variable(seq_range_expand) if sequence_length.is_cuda: seq_range_expand = seq_range_expand.cuda() seq_length_expand =", "*args): if self.multi_speaker: text, speaker_id = args else: text =", "and randomize it 3. Permutate mini-batchs \"\"\" def __init__(self, lengths,", "= np.arange(len(indices[:e]) // self.batch_size) random.shuffle(perm) indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1)", "--log-event-path=<name> Log event path. --reset-optimizer Reset optimizer. --load-embedding=<path> Load embedding", "np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram)) spectrogram = np.flip(spectrogram, axis=1) #", "None, None, None # Losses w = hparams.binary_divergence_weight # mel:", "b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32)", "patch and randomize it 3. Permutate mini-batchs \"\"\" def __init__(self,", "= batch_group_size assert batch_group_size % batch_size == 0 self.permutate =", "args else: text = args[0] seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob) if", "binary_div @jit(nopython=True) def guided_attention(N, max_N, T, max_T, g): W =", "l1_loss = l1(y_hat, y) # Priority L1 loss if priority_bin", "writer.add_scalar(\"mel_binary_div_loss\", float(mel_binary_div.data[0]), global_step) if train_postnet: writer.add_scalar(\"linear_loss\", float(linear_loss.data[0]), global_step) writer.add_scalar(\"linear_l1_loss\", float(linear_l1_loss.data[0]),", "path. --checkpoint-postnet=<path> Restore postnet model from checkpoint path. --train-seq2seq-only Train", "e = s - 1, e - 1 frame_positions =", "= torch.arange(s, e).long().unsqueeze(0).expand( len(batch), max_decoder_target_len) # done flags done =", "seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand = Variable(seq_range_expand) if sequence_length.is_cuda: seq_range_expand = seq_range_expand.cuda()", "args:\\n\", args) checkpoint_dir = args[\"--checkpoint-dir\"] checkpoint_path = args[\"--checkpoint\"] checkpoint_seq2seq_path =", "checkpoint_dir = args[\"--checkpoint-dir\"] checkpoint_path = args[\"--checkpoint\"] checkpoint_seq2seq_path = args[\"--checkpoint-seq2seq\"] checkpoint_postnet_path", "a way to measure the acute emotional intelligence that has", "1 - np.exp(-(n / N - t / T)**2 /", "* g * g)) return W def guided_attentions(input_lengths, target_lengths, max_target_len,", "elif train_postnet: assert speaker_ids is None linear_outputs = model.postnet(mel) mel_outputs,", "Apply model if train_seq2seq and train_postnet: mel_outputs, linear_outputs, attn, done_hat", "= s - 1, e - 1 frame_positions = torch.arange(s,", "target_lengths = Variable(target_lengths) speaker_ids = Variable(speaker_ids) if ismultispeaker else None", "clip_thresh > 0: writer.add_scalar(\"gradient norm\", grad_norm, global_step) writer.add_scalar(\"learning rate\", current_lr,", "global_epoch < nepochs: running_loss = 0. for step, (x, input_lengths,", "= args[\"--load-embedding\"] checkpoint_restore_parts = args[\"--restore-parts\"] speaker_id = args[\"--speaker-id\"] speaker_id =", "and hparams.preset != \"\": preset = hparams.presets[hparams.preset] import json hparams.parse_json(json.dumps(preset))", "is not None: load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer) # Load embedding", "if given. --checkpoint-seq2seq=<path> Restore seq2seq model from checkpoint path. --checkpoint-postnet=<path>", "max_decoder_target_len) # done flags done = np.array([_pad(np.zeros(len(x[1]) // r //", "divergence loss if hparams.binary_divergence_weight <= 0: binary_div = Variable(y.data.new(1).zero_()) else:", "path) # Target mel spectrogram if mel_outputs is not None:", "return W def guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2): B = len(input_lengths)", "False return texts return texts, speaker_ids else: return texts def", "use_cuda: cudnn.benchmark = False _frontend = None # to be", "> 0 and global_step % hparams.eval_interval == 0: eval_model(global_step, writer,", "log_event_path is None: log_event_path = \"log/run-test\" + str(datetime.now()).replace(\" \", \"_\")", "Pick a small patch and randomize it 3. Permutate mini-batchs", "from checkpoint path. --checkpoint-postnet=<path> Restore postnet model from checkpoint path.", "def time_string(): return datetime.now().strftime('%Y-%m-%d %H:%M') def save_alignment(path, attn): plot_alignment(attn.T, path,", "optim import torch.backends.cudnn as cudnn from torch.utils import data as", "warn from dv3.hparams import hparams, hparams_debug_string fs = hparams.sample_rate global_step", "dv3.audio.save_wav(signal, path) # Target mel spectrogram if mel_outputs is not", "reset_optimizer): global global_step global global_epoch print(\"Load checkpoint from: {}\".format(path)) checkpoint", "linear_outputs, attn, mel, y, input_lengths, checkpoint_dir) save_checkpoint( model, optimizer, global_step,", "optimizer_state is not None: print(\"Load optimizer state from {}\".format(path)) optimizer.load_state_dict(checkpoint[\"optimizer\"])", "# spectrogram-domain mask target_mask = sequence_mask( target_lengths, max_len=y.size(1)).unsqueeze(-1) else: target_mask", "def collect_features(self, path): return np.load(path) class MelSpecDataSource(_NPYDataSource): def __init__(self, data_root,", "attn is not None and attn.dim() == 4: for i,", "torch.LongTensor(a) input_lengths = torch.LongTensor(input_lengths) target_lengths = torch.LongTensor(target_lengths) b = np.array([_pad_2d(x[1],", "this as a miracle without any physical explanation.\", ] import", ":, :priority_bin], y[:, :, :priority_bin], mask=mask) \\ + (1 -", "spectrogram\", mel_output, global_step) # Target spectrogram if linear_outputs is not", "decoder output domain mask decoder_target_mask = sequence_mask( target_lengths / (r", "def save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel, y, input_lengths, checkpoint_dir=None):", "speaker_str), signal, global_step, sample_rate=fs) except Exception as e: warn(str(e)) pass", "linear_outputs is not None: linear_output = y[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output))", "None: indices = np.array(speaker_ids) == self.speaker_id texts = list(np.array(texts)[indices]) self.multi_speaker", "g)) return W def guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2): B =", "# Save averaged alignment alignment_dir = join(checkpoint_dir, \"alignment_ave\") os.makedirs(alignment_dir, exist_ok=True)", ".expand_as(seq_range_expand) return (seq_range_expand < seq_length_expand).float() class MaskedL1Loss(nn.Module): def __init__(self): super(MaskedL1Loss,", "frame_positions = torch.arange(s, e).long().unsqueeze(0).expand( len(batch), max_decoder_target_len) # done flags done", "i * batch_group_size e = s + batch_group_size random.shuffle(indices[s:e]) #", "writer, model, checkpoint_dir, ismultispeaker) # Update loss.backward() if clip_thresh >", "global_step global global_epoch print(\"Load checkpoint from: {}\".format(path)) checkpoint = torch.load(path)", "path if given. --checkpoint-seq2seq=<path> Restore seq2seq model from checkpoint path.", "loss / mask_.sum() def collate_fn(batch): \"\"\"Create batch\"\"\" r = hparams.outputs_per_step", "= attn.mean(0)[idx].cpu().data.numpy() save_alignment(path, alignment) tag = \"averaged_alignment\" writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T)", "len(x[0]) + 1), max_input_len) for x in batch], dtype=np.int) text_positions", "T, max_T, g): W = np.zeros((max_N, max_T), dtype=np.float32) for n", "def build_model(): model = getattr(builder, hparams.builder)( n_speakers=hparams.n_speakers, speaker_embed_dim=hparams.speaker_embed_dim, n_vocab=_frontend.n_vocab, embed_dim=hparams.text_embed_dim,", "text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len) for x in", "/ T)**2 / (2 * g * g)) return W", "= args[\"--checkpoint-postnet\"] load_embedding = args[\"--load-embedding\"] checkpoint_restore_parts = args[\"--restore-parts\"] speaker_id =", "print(\"Training seq2seq model\") elif train_postnet: print(\"Training postnet model\") else: assert", "5 texts = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[3], lines)) if self.multi_speaker: speaker_ids", "as a miracle without any physical explanation.\", ] import dv3.synthesis", "step, (x, input_lengths, mel, y, positions, done, target_lengths, speaker_ids) \\", "param_group in optimizer.param_groups: param_group['lr'] = current_lr optimizer.zero_grad() # Used for", "4: for i, alignment in enumerate(attn): alignment = alignment[idx].cpu().data.numpy() tag", "batch_size=16, batch_group_size=None, permutate=True): self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths)) self.batch_size = batch_size", ") return model def load_checkpoint(path, model, optimizer, reset_optimizer): global global_step", "str(datetime.now()).replace(\" \", \"_\") print(\"Los event path: {}\".format(log_event_path)) writer = SummaryWriter(log_dir=log_event_path)", "mask decoder_target_mask = decoder_target_mask[:, r:, :] target_mask = target_mask[:, r:,", "init_lr=0.002, checkpoint_dir=None, checkpoint_interval=None, nepochs=None, clip_thresh=1.0, train_seq2seq=True, train_postnet=True): if use_cuda: model", "speaker_id)) Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id)) # Prepare sampler frame_lengths =", "\"seq2seq.encoder.embed_tokens.weight\" model.seq2seq.encoder.embed_tokens.weight.data = state[key] # https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 def restore_parts(path, model): print(\"Restore", "torch import optim import torch.backends.cudnn as cudnn from torch.utils import", "sys import os from tensorboardX import SummaryWriter from matplotlib import", "is not None: linear_output = y[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Target", "> 0: # s, e = s - 1, e", "model.linear_dim r = hparams.outputs_per_step downsample_step = hparams.downsample_step current_lr = init_lr", "args[\"--checkpoint-seq2seq\"] checkpoint_postnet_path = args[\"--checkpoint-postnet\"] load_embedding = args[\"--load-embedding\"] checkpoint_restore_parts = args[\"--restore-parts\"]", "from docopt import docopt import sys from os.path import dirname,", "is not None else None data_root = args[\"--data-root\"] if data_root", "filter by speaker_id self.speaker_id = speaker_id def collect_files(self): meta =", "miracle without any physical explanation.\", ] import dv3.synthesis synthesis._frontend =", "np.load(path) class MelSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(MelSpecDataSource, self).__init__(data_root, 1,", "if priority_bin is not None and priority_w > 0: if", "= model.linear_dim r = hparams.outputs_per_step downsample_step = hparams.downsample_step current_lr =", "speaker_ids.cuda() if ismultispeaker else None # Create mask if we", "None: log_event_path = \"log/run-test\" + str(datetime.now()).replace(\" \", \"_\") print(\"Los event", "im = ax.imshow( alignment, aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel", "not None and attn.dim() == 4: for i, alignment in", "trained train_seq2seq = args[\"--train-seq2seq-only\"] train_postnet = args[\"--train-postnet-only\"] # train both", "data_utils.DataLoader( dataset, batch_size=hparams.batch_size, num_workers=hparams.num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=hparams.pin_memory) print(\"dataloader_prepared\") # Model", "= mel_outputs.view(len(mel), -1, mel.size(-1)) linear_outputs = None elif train_postnet: assert", "\"multispeaker{}\".format(speaker_id) if speaker_id is not None else \"single\" for idx,", "max_target_len += b_pad * downsample_step a = np.array([_pad(x[0], max_input_len) for", ":-r, :], mel[:, r:, :], decoder_target_mask) mel_loss = (1 -", "+= batch_group_size if s < len(indices): random.shuffle(indices[s:]) return iter(indices) def", "self.speaker_id is not None: speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines))", "plt import sys import os from tensorboardX import SummaryWriter from", "**hparams.lr_schedule_kwargs) for param_group in optimizer.param_groups: param_group['lr'] = current_lr optimizer.zero_grad() #", "== self.speaker_id texts = list(np.array(texts)[indices]) self.multi_speaker = False return texts", "torch from torch.utils import data as data_utils from torch.autograd import", "is not None: mel_output = mel[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Target", "batch_group_size if s < len(indices): random.shuffle(indices[s:]) return iter(indices) def __len__(self):", "optimizer, reset_optimizer) # Load embedding if load_embedding is not None:", "\"rb\") as f: lines = f.readlines() l = lines[0].decode(\"utf-8\").split(\"|\") assert", "= torch.arange(0, max_len).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand = Variable(seq_range_expand)", "alignment = alignment[idx].cpu().data.numpy() tag = \"alignment_layer{}\".format(i + 1) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment,", "m = model.postnet checkpoint_path = join( checkpoint_dir, \"checkpoint_step{:09d}{}.pth\".format(global_step, suffix)) optimizer_state", "w) * z.mean() else: binary_div = z.mean() return l1_loss, binary_div", "= hparams.outputs_per_step downsample_step = hparams.downsample_step current_lr = init_lr binary_criterion =", "save_alignment(path, attn): plot_alignment(attn.T, path, info=\"{}, {}, step={}\".format( hparams.builder, time_string(), global_step))", "binary_div = z.mean() return l1_loss, binary_div @jit(nopython=True) def guided_attention(N, max_N,", "= join(self.data_root, \"train.txt\") with open(meta, \"rb\") as f: lines =", "mask target_mask = sequence_mask( target_lengths, max_len=y.size(1)).unsqueeze(-1) else: target_mask = decoder_target_mask", "idx = np.random.randint(0, len(input_lengths)) idx = min(1, len(input_lengths) - 1)", "= list( map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[2]), lines)) paths = list(map(lambda l:", "1 def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch, train_seq2seq, train_postnet): if", "max_len, constant_values=0): return np.pad(seq, (0, max_len - len(seq)), mode='constant', constant_values=constant_values)", "== 4 # Lengths input_lengths = [len(x[0]) for x in", "if multi_speaker: speaker_ids = torch.LongTensor([x[3] for x in batch]) else:", "\"\"\" from docopt import docopt import sys from os.path import", "speaker_str = \"multispeaker{}\".format(speaker_id) if speaker_id is not None else \"single\"", "Save averaged alignment alignment_dir = join(checkpoint_dir, \"alignment_ave\") os.makedirs(alignment_dir, exist_ok=True) path", "args[\"--reset-optimizer\"] # Which model to be trained train_seq2seq = args[\"--train-seq2seq-only\"]", ":], y[:, r:, :], target_mask, priority_bin=n_priority_freq, priority_w=hparams.priority_freq_weight) linear_loss = (1", "l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin]) l1_loss = (1 -", "= seq_range_expand.cuda() seq_length_expand = sequence_length.unsqueeze(1) \\ .expand_as(seq_range_expand) return (seq_range_expand <", "}, checkpoint_path) print(\"Saved checkpoint:\", checkpoint_path) def build_model(): model = getattr(builder,", "target_mask, priority_bin=n_priority_freq, priority_w=hparams.priority_freq_weight) linear_loss = (1 - w) * linear_l1_loss", "print(\"Command line args:\\n\", args) checkpoint_dir = args[\"--checkpoint-dir\"] checkpoint_path = args[\"--checkpoint\"]", "def guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2): B = len(input_lengths) max_input_len =", "None return x_batch, input_lengths, mel_batch, y_batch, \\ (text_positions, frame_positions), done,", "attn.dim() == 4: for i, alignment in enumerate(attn): alignment =", "--restore-parts=<path> Restore part of the model. --log-event-path=<name> Log event path.", "getattr(frontend, hparams.frontend) os.makedirs(checkpoint_dir, exist_ok=True) # Input dataset definitions X =", "prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Target mel spectrogram\", mel_output, global_step) # Target spectrogram if", "# save files as well for now alignment_dir = join(checkpoint_dir,", "--checkpoint-postnet=<path> Restore postnet model from checkpoint path. --train-seq2seq-only Train only", "None and hparams.preset != \"\": preset = hparams.presets[hparams.preset] import json", "datasets. -h, --help Show this help message and exit \"\"\"", "max_target_len += r - max_target_len % r assert max_target_len %", "hparams.parse(args[\"--hparams\"]) print(hparams_debug_string()) assert hparams.name == \"deepvoice3\" # Presets if hparams.preset", "train_postnet) if global_step > 0 and global_step % hparams.eval_interval ==", "for n in range(N): for t in range(T): W[n, t]", "linear spectrogram\", spectrogram, global_step) def logit(x, eps=1e-8): return torch.log(x +", "masked_l1(y_hat, y, mask=mask) + (1 - w) * l1(y_hat, y)", "speaker_id = args else: text = args[0] seq = _frontend.text_to_sequence(text,", "guided_attention(input_lengths[b], max_input_len, target_lengths[b], max_target_len, g).T return W def train(model, data_loader,", "self.sorted_indices.clone() batch_group_size = self.batch_group_size s, e = 0, 0 for", "= logit(y_hat) z = -y * y_hat_logits + torch.log(1 +", "max_len).unsqueeze(-1) # (B, T, D) mask_ = mask.expand_as(input) loss =", "mini-batchs \"\"\" def __init__(self, lengths, batch_size=16, batch_group_size=None, permutate=True): self.lengths, self.sorted_indices", "join(checkpoint_dir, \"alignment_ave\") os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, \"step{:09d}_alignment.png\".format(global_step)) alignment =", "mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Predicted mel spectrogram\", mel_output, global_step) # Predicted", "global_step) # Target spectrogram if linear_outputs is not None: linear_output", "mask_).sum() / mask_.sum() def spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0): masked_l1", "Variable(speaker_ids) if ismultispeaker else None if use_cuda: if train_seq2seq: x", "flip against freq axis return np.uint8(cm.magma(spectrogram.T) * 255) def eval_model(global_step,", "s = i * batch_group_size e = s + batch_group_size", "--checkpoint-seq2seq=<path> Restore seq2seq model from checkpoint path. --checkpoint-postnet=<path> Restore postnet", "sampler=sampler, collate_fn=collate_fn, pin_memory=hparams.pin_memory) print(\"dataloader_prepared\") # Model model = build_model() if", "1), max_input_len) for x in batch], dtype=np.int) text_positions = torch.LongTensor(text_positions)", "sampler = PartialyRandomizedSimilarTimeLengthSampler( frame_lengths, batch_size=hparams.batch_size) # Dataset and Dataloader setup", "global_epoch while global_epoch < nepochs: running_loss = 0. for step,", "use_cuda = torch.cuda.is_available() if use_cuda: cudnn.benchmark = False _frontend =", "if global_step > 0 and global_step % checkpoint_interval == 0:", "range(T): W[n, t] = 1 - np.exp(-(n / N -", "the acute emotional intelligence that has never gone out of", "print(\"Load optimizer state from {}\".format(path)) optimizer.load_state_dict(checkpoint[\"optimizer\"]) global_step = checkpoint[\"global_step\"] global_epoch", "if train_seq2seq: mel_l1_loss, mel_binary_div = spec_loss( mel_outputs[:, :-r, :], mel[:,", "in batch] max_target_len = max(target_lengths) if max_target_len % r !=", "= join(checkpoint_dir, \"step{:09d}_predicted.wav\".format( global_step)) try: writer.add_audio(\"Predicted audio signal\", signal, global_step,", "len(input_lengths)) idx = min(1, len(input_lengths) - 1) input_length = input_lengths[idx]", "n_priority_freq = int(hparams.priority_freq / (fs * 0.5) * linear_dim) linear_l1_loss,", "idx, text in enumerate(texts): signal, alignment, _, mel = synthesis.tts(", "data_utils from torch.autograd import Variable from torch import nn from", "self.multi_speaker = False return texts return texts, speaker_ids else: return", "train_postnet: assert speaker_ids is None linear_outputs = model.postnet(mel) mel_outputs, attn,", "from tensorboardX import SummaryWriter from matplotlib import cm from warnings", "writer.add_audio(\"(Eval) Predicted audio signal {}_{}\".format(idx, speaker_str), signal, global_step, sample_rate=fs) except", "pyplot as plt import sys import os from tensorboardX import", "import FileSourceDataset, FileDataSource from os.path import join, expanduser import random", "b_pad = r max_target_len += b_pad * downsample_step a =", "spectrogram\", mel_output, global_step) # Predicted spectrogram if linear_outputs is not", "checkpoint_seq2seq_path = args[\"--checkpoint-seq2seq\"] checkpoint_postnet_path = args[\"--checkpoint-postnet\"] load_embedding = args[\"--load-embedding\"] checkpoint_restore_parts", "(B, T, 1) if mask is None: mask = sequence_mask(lengths,", "optimizer = optim.Adam(model.get_trainable_parameters(), lr=hparams.initial_learning_rate, betas=( hparams.adam_beta1, hparams.adam_beta2), eps=hparams.adam_eps, weight_decay=hparams.weight_decay) if", "y[:, :, :priority_bin]) l1_loss = (1 - priority_w) * l1_loss", "grad_norm = torch.nn.utils.clip_grad_norm( model.get_trainable_parameters(), clip_thresh) optimizer.step() # Logs writer.add_scalar(\"loss\", float(loss.data[0]),", "+= 1 def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch, train_seq2seq, train_postnet):", "- torch.log(1 - x + eps) def masked_mean(y, mask): #", "info=\"{}, {}, step={}\".format( hparams.builder, time_string(), global_step)) def prepare_spec_image(spectrogram): # [0,", "writer.add_image(\"Predicted linear spectrogram\", spectrogram, global_step) # Predicted audio signal signal", "l: l.decode(\"utf-8\").split(\"|\")[self.col], lines)) paths = list(map(lambda f: join(self.data_root, f), paths))", "if train_postnet: n_priority_freq = int(hparams.priority_freq / (fs * 0.5) *", "if attn is not None and attn.dim() == 4: for", "need to cast numpy.int64 to int self.frame_lengths = list(map(int, self.frame_lengths))", "\\\"{}\\\": {}\".format( hparams.preset, json.dumps(preset, indent=4))) _frontend = getattr(frontend, hparams.frontend) os.makedirs(checkpoint_dir,", "checkpoint_interval == 0: save_states( global_step, writer, mel_outputs, linear_outputs, attn, mel,", "r != 0: max_target_len += r - max_target_len % r", "permutate=True): self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths)) self.batch_size = batch_size if batch_group_size", "_frontend eval_output_dir = join(checkpoint_dir, \"eval\") os.makedirs(eval_output_dir, exist_ok=True) # hard coded", "train_postnet: suffix = \"_postnet\" m = model.postnet checkpoint_path = join(", "def __iter__(self): indices = self.sorted_indices.clone() batch_group_size = self.batch_group_size s, e", "say they have discovered a new particle.\", \"There's a way", "save_alignment(path, alignment) tag = \"eval_averaged_alignment_{}_{}\".format(idx, speaker_str) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) *", "ismultispeaker else None # Create mask if we use masked", "Create mask if we use masked loss if hparams.masked_loss_weight >", "save files as well for now alignment_dir = join(checkpoint_dir, \"alignment_layer{}\".format(i", "y_batch, \\ (text_positions, frame_positions), done, target_lengths, speaker_ids def time_string(): return", "Combine losses if train_seq2seq and train_postnet: loss = mel_loss +", "mel_outputs is not None: mel_output = mel[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output))", "for step, (x, input_lengths, mel, y, positions, done, target_lengths, speaker_ids)", "= getattr(dv3.lrschedule, hparams.lr_schedule) current_lr = lr_schedule_f( init_lr, global_step, **hparams.lr_schedule_kwargs) for", "downsample_step > 1: mel = mel[:, 0::downsample_step, :].contiguous() # Lengths", "Y # alias self.multi_speaker = X.file_data_source.multi_speaker def __getitem__(self, idx): if", "mel[:, 0::downsample_step, :].contiguous() # Lengths input_lengths = input_lengths.long().numpy() decoder_lengths =", "from matplotlib import cm from warnings import warn from dv3.hparams", "load_embedding = args[\"--load-embedding\"] checkpoint_restore_parts = args[\"--restore-parts\"] speaker_id = args[\"--speaker-id\"] speaker_id", "* 255) def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker): # harded", "] import dv3.synthesis synthesis._frontend = _frontend eval_output_dir = join(checkpoint_dir, \"eval\")", "data_loader, optimizer, writer, init_lr=0.002, checkpoint_dir=None, checkpoint_interval=None, nepochs=None, clip_thresh=1.0, train_seq2seq=True, train_postnet=True):", "+= attn_loss if global_step > 0 and global_step % checkpoint_interval", "= len(batch[0]) == 4 # Lengths input_lengths = [len(x[0]) for", "model.seq2seq( x, mel, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) # reshape mel_outputs =", "global_step = 0 global_epoch = 0 use_cuda = torch.cuda.is_available() if", "if clip_thresh > 0: grad_norm = torch.nn.utils.clip_grad_norm( model.get_trainable_parameters(), clip_thresh) optimizer.step()", "hparams.downsample_step multi_speaker = len(batch[0]) == 4 # Lengths input_lengths =", "None, filter by speaker_id self.speaker_id = speaker_id def collect_files(self): meta", "input_lengths=input_lengths) # reshape mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1)) linear_outputs =", "\"_seq2seq\" m = model.seq2seq elif train_postnet: suffix = \"_postnet\" m", "model.cuda() optimizer = optim.Adam(model.get_trainable_parameters(), lr=hparams.initial_learning_rate, betas=( hparams.adam_beta1, hparams.adam_beta2), eps=hparams.adam_eps, weight_decay=hparams.weight_decay)", "s += batch_group_size if s < len(indices): random.shuffle(indices[s:]) return iter(indices)", "10] if ismultispeaker else [None] for speaker_id in speaker_ids: speaker_str", "None: speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines)) # Filter by", "of style.\", \"President Trump met with other leaders at the", "use_memory_mask=hparams.use_memory_mask, trainable_positional_encodings=hparams.trainable_positional_encodings, force_monotonic_attention=hparams.force_monotonic_attention, use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input, max_positions=hparams.max_positions, speaker_embedding_weight_std=hparams.speaker_embedding_weight_std, freeze_embedding=hparams.freeze_embedding, window_ahead=hparams.window_ahead, window_backward=hparams.window_backward, key_projection=hparams.key_projection,", "for x in batch] max_target_len = max(target_lengths) if max_target_len %", "optimizer, reset_optimizer) if checkpoint_path is not None: load_checkpoint(checkpoint_path, model, optimizer,", "== 5 texts = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[3], lines)) if self.multi_speaker:", "* 32, len(self.lengths)) if batch_group_size % batch_size != 0: batch_group_size", "+= r - max_target_len % r assert max_target_len % r", "now alignment_dir = join(checkpoint_dir, \"alignment_layer{}\".format(i + 1)) os.makedirs(alignment_dir, exist_ok=True) path", "writer.add_scalar(\"done_loss\", float(done_loss.data[0]), global_step) writer.add_scalar(\"mel loss\", float(mel_loss.data[0]), global_step) writer.add_scalar(\"mel_l1_loss\", float(mel_l1_loss.data[0]), global_step)", "loss if hparams.masked_loss_weight > 0: # decoder output domain mask", "Restore part of the model. --log-event-path=<name> Log event path. --reset-optimizer", "= None self.multi_speaker = False # If not None, filter", "= SummaryWriter(log_dir=log_event_path) # Train! try: train(model, data_loader, optimizer, writer, init_lr=hparams.initial_learning_rate,", "e: warn(str(e)) pass dv3.audio.save_wav(signal, path) # Target mel spectrogram if", "e = 0, 0 for i in range(len(indices) // batch_group_size):", "= args[0] seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob) if self.multi_speaker: return np.asarray(seq,", "return loss / mask_.sum() def collate_fn(batch): \"\"\"Create batch\"\"\" r =", ":priority_bin], y[:, :, :priority_bin], mask=mask) \\ + (1 - w)", "model from checkpoint path if given. --checkpoint-seq2seq=<path> Restore seq2seq model", "plt.subplots() im = ax.imshow( alignment, aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax)", "checkpoints if checkpoint_postnet_path is not None: load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer)", "norm\", grad_norm, global_step) writer.add_scalar(\"learning rate\", current_lr, global_step) global_step += 1", "exist_ok=True) path = join(alignment_dir, \"step{:09d}_alignment.png\".format(global_step)) alignment = attn.mean(0)[idx].cpu().data.numpy() save_alignment(path, alignment)", "(1 - w) * l1(y_hat, y) else: assert mask is", "seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob) if self.multi_speaker: return np.asarray(seq, dtype=np.int32), int(speaker_id)", "None, None # Apply model if train_seq2seq and train_postnet: mel_outputs,", "len(indices): random.shuffle(indices[s:]) return iter(indices) def __len__(self): return len(self.sorted_indices) class PyTorchDataset(object):", "time_string(): return datetime.now().strftime('%Y-%m-%d %H:%M') def save_alignment(path, attn): plot_alignment(attn.T, path, info=\"{},", "batch], dtype=np.int) text_positions = torch.LongTensor(text_positions) max_decoder_target_len = max_target_len // r", "self).__init__(data_root, 1, speaker_id) class LinearSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(LinearSpecDataSource,", "speaker_ids: speaker_str = \"multispeaker{}\".format(speaker_id) if speaker_id is not None else", "+ (1 - w) * l1(y_hat, y) else: assert mask", "encoder_channels=hparams.encoder_channels, decoder_channels=hparams.decoder_channels, converter_channels=hparams.converter_channels, use_memory_mask=hparams.use_memory_mask, trainable_positional_encodings=hparams.trainable_positional_encodings, force_monotonic_attention=hparams.force_monotonic_attention, use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input, max_positions=hparams.max_positions, speaker_embedding_weight_std=hparams.speaker_embedding_weight_std, freeze_embedding=hparams.freeze_embedding,", "255), global_step) # Predicted mel spectrogram if mel_outputs is not", "checkpoint_interval=hparams.checkpoint_interval, nepochs=hparams.nepochs, clip_thresh=hparams.clip_thresh, train_seq2seq=train_seq2seq, train_postnet=train_postnet) except KeyboardInterrupt: save_checkpoint( model, optimizer,", "frame_positions), done, target_lengths, speaker_ids def time_string(): return datetime.now().strftime('%Y-%m-%d %H:%M') def", "grad_norm, global_step) writer.add_scalar(\"learning rate\", current_lr, global_step) global_step += 1 running_loss", "_frontend = getattr(frontend, hparams.frontend) os.makedirs(checkpoint_dir, exist_ok=True) # Input dataset definitions", "float( linear_binary_div.data[0]), global_step) if train_seq2seq and hparams.use_guided_attention: writer.add_scalar(\"attn_loss\", float(attn_loss.data[0]), global_step)", "mel_loss + done_loss elif train_postnet: loss = linear_loss # attention", "model\") else: assert False, \"must be specified wrong args\" #", "writer, init_lr=hparams.initial_learning_rate, checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.checkpoint_interval, nepochs=hparams.nepochs, clip_thresh=hparams.clip_thresh, train_seq2seq=train_seq2seq, train_postnet=train_postnet) except KeyboardInterrupt:", "reset_optimizer) if checkpoint_path is not None: load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer)", "np.arange(len(indices[:e]) // self.batch_size) random.shuffle(perm) indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1) #", "if linear_outputs is not None: linear_output = y[idx].cpu().data.numpy() spectrogram =", "X = FileSourceDataset(TextDataSource(data_root, speaker_id)) Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id)) Y =", "nnmnkwii.datasets import FileSourceDataset, FileDataSource from os.path import join, expanduser import", "= model.seq2seq elif train_postnet: suffix = \"_postnet\" m = model.postnet", "from torch.utils import data as data_utils from torch.autograd import Variable", "col self.frame_lengths = [] self.speaker_id = speaker_id def collect_files(self): meta", "from checkpoint. --speaker-id=<N> Use specific speaker of data in case", "np.array(speaker_ids) == self.speaker_id paths = list(np.array(paths)[indices]) self.frame_lengths = list(np.array(self.frame_lengths)[indices]) #", "Variable(mel), Variable(y) text_positions = Variable(text_positions) frame_positions = Variable(frame_positions) done =", "mel = mel[:, 0::downsample_step, :].contiguous() # Lengths input_lengths = input_lengths.long().numpy()", "is not None: speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines)) #", "w * masked_l1( y_hat[:, :, :priority_bin], y[:, :, :priority_bin], mask=mask)", "input_lengths = torch.LongTensor(input_lengths) target_lengths = torch.LongTensor(target_lengths) b = np.array([_pad_2d(x[1], max_target_len,", "path: {}\".format(log_event_path)) writer = SummaryWriter(log_dir=log_event_path) # Train! try: train(model, data_loader,", "* masked_l1(y_hat, y, mask=mask) + (1 - w) * l1(y_hat,", "self.Y = Y # alias self.multi_speaker = X.file_data_source.multi_speaker def __getitem__(self,", "and priority_w > 0: if w > 0: priority_loss =", "embedding from checkpoint. --speaker-id=<N> Use specific speaker of data in", "checkpoint_interval=None, nepochs=None, clip_thresh=1.0, train_seq2seq=True, train_postnet=True): if use_cuda: model = model.cuda()", "global_step)) try: writer.add_audio(\"Predicted audio signal\", signal, global_step, sample_rate=fs) except Exception", "priority_bin=None, priority_w=0): masked_l1 = MaskedL1Loss() l1 = nn.L1Loss() w =", "args[\"--train-postnet-only\"] # train both if not specified if not train_seq2seq", "= batch_size if batch_group_size is None: batch_group_size = min(batch_size *", "setup dataset = PyTorchDataset(X, Mel, Y) data_loader = data_utils.DataLoader( dataset,", "signal\", signal, global_step, sample_rate=fs) except Exception as e: warn(str(e)) pass", "def restore_parts(path, model): print(\"Restore part of the model from: {}\".format(path))", "= checkpoint[\"optimizer\"] if optimizer_state is not None: print(\"Load optimizer state", "sample_rate=fs) except Exception as e: warn(str(e)) pass def save_states(global_step, writer,", "max_target_len % r assert max_target_len % r == 0 if", "be specified wrong args\" # Override hyper parameters hparams.parse(args[\"--hparams\"]) print(hparams_debug_string())", "mel_loss + linear_loss + done_loss elif train_seq2seq: loss = mel_loss", "wrong args\" # Override hyper parameters hparams.parse(args[\"--hparams\"]) print(hparams_debug_string()) assert hparams.name", "decoder_target_mask = sequence_mask( target_lengths / (r * downsample_step), max_len=mel.size(1)).unsqueeze(-1) if", ":], target_mask, priority_bin=n_priority_freq, priority_w=hparams.priority_freq_weight) linear_loss = (1 - w) *", "+= loss.data[0] averaged_loss = running_loss / (len(data_loader)) writer.add_scalar(\"loss (per epoch)\",", "= torch.LongTensor(a) input_lengths = torch.LongTensor(input_lengths) target_lengths = torch.LongTensor(target_lengths) b =", "s + batch_group_size random.shuffle(indices[s:e]) # Permutate batches if self.permutate: perm", "decoder_lengths, attn.size(-2), g=hparams.guided_attention_sigma) soft_mask = Variable(torch.from_numpy(soft_mask)) soft_mask = soft_mask.cuda() if", "text_positions = torch.LongTensor(text_positions) max_decoder_target_len = max_target_len // r // downsample_step", "checkpoint. --speaker-id=<N> Use specific speaker of data in case for", "\"Some have accepted this as a miracle without any physical", "# Mel writer.add_image(\"(Eval) Predicted mel spectrogram text{}_{}\".format(idx, speaker_str), prepare_spec_image(mel), global_step)", "batch], dtype=np.float32) mel_batch = torch.FloatTensor(b) c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad)", "target_lengths = done.cuda(), target_lengths.cuda() speaker_ids = speaker_ids.cuda() if ismultispeaker else", "loss.backward() if clip_thresh > 0: grad_norm = torch.nn.utils.clip_grad_norm( model.get_trainable_parameters(), clip_thresh)", "args[\"--checkpoint\"] checkpoint_seq2seq_path = args[\"--checkpoint-seq2seq\"] checkpoint_postnet_path = args[\"--checkpoint-postnet\"] load_embedding = args[\"--load-embedding\"]", "else: priority_loss = l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin]) l1_loss", "current_lr optimizer.zero_grad() # Used for Position encoding text_positions, frame_positions =", "[(b_pad, max_len - len(x) - b_pad), (0, 0)], mode=\"constant\", constant_values=0)", "= indices[:e].view(-1, self.batch_size)[perm, :].view(-1) # Handle last elements s +=", "speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines)) # Filter by speaker_id", "= [] self.speaker_id = speaker_id def collect_files(self): meta = join(self.data_root,", "!= 0: batch_group_size -= batch_group_size % batch_size self.batch_group_size = batch_group_size", "if info is not None: xlabel += '\\n\\n' + info", "= FileSourceDataset(MelSpecDataSource(data_root, speaker_id)) Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id)) # Prepare sampler", "x + eps) def masked_mean(y, mask): # (B, T, D)", "Used for Position encoding text_positions, frame_positions = positions # Downsample", "f: join(self.data_root, f), paths)) if multi_speaker and self.speaker_id is not", "Permutate mini-batchs \"\"\" def __init__(self, lengths, batch_size=16, batch_group_size=None, permutate=True): self.lengths,", "downsample_step a = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int)", "global_step) # Predicted audio signal signal = dv3.audio.inv_spectrogram(linear_output.T) signal /=", "dv3.audio.inv_spectrogram(linear_output.T) signal /= np.max(np.abs(signal)) path = join(checkpoint_dir, \"step{:09d}_predicted.wav\".format( global_step)) try:", "0: batch_group_size -= batch_group_size % batch_size self.batch_group_size = batch_group_size assert", "a miracle without any physical explanation.\", ] import dv3.synthesis synthesis._frontend", "global_step) if train_seq2seq: writer.add_scalar(\"done_loss\", float(done_loss.data[0]), global_step) writer.add_scalar(\"mel loss\", float(mel_loss.data[0]), global_step)", "tag = \"eval_averaged_alignment_{}_{}\".format(idx, speaker_str) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)", "e: warn(str(e)) pass def save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel,", "model): state = torch.load(path)[\"state_dict\"] key = \"seq2seq.encoder.embed_tokens.weight\" model.seq2seq.encoder.embed_tokens.weight.data = state[key]", "warn(str(e)) pass def save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel, y,", "l.decode(\"utf-8\").split(\"|\")[self.col], lines)) paths = list(map(lambda f: join(self.data_root, f), paths)) if", "signal {}_{}\".format(idx, speaker_str), signal, global_step, sample_rate=fs) except Exception as e:", "checkpoint_path is not None: load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer) # Load", "model from checkpoint path. --checkpoint-postnet=<path> Restore postnet model from checkpoint", "with preset \\\"{}\\\": {}\".format( hparams.preset, json.dumps(preset, indent=4))) _frontend = getattr(frontend,", "import join, expanduser import random import librosa.display from matplotlib import", "mode=\"constant\", constant_values=0) return x def plot_alignment(alignment, path, info=None): fig, ax", "data_loader, optimizer, writer, init_lr=hparams.initial_learning_rate, checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.checkpoint_interval, nepochs=hparams.nepochs, clip_thresh=hparams.clip_thresh, train_seq2seq=train_seq2seq, train_postnet=train_postnet)", "class PartialyRandomizedSimilarTimeLengthSampler(Sampler): \"\"\"Partially randmoized sampler 1. Sort by lengths 2.", "dtype=np.float32) for n in range(N): for t in range(T): W[n,", "plt.tight_layout() plt.savefig(path, format='png') plt.close() class TextDataSource(FileDataSource): def __init__(self, data_root, speaker_id=None):", "range(N): for t in range(T): W[n, t] = 1 -", "= mel.cuda() done, target_lengths = done.cuda(), target_lengths.cuda() speaker_ids = speaker_ids.cuda()", "s, e = 0, 0 for i in range(len(indices) //", "except KeyboardInterrupt: save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet)", "for tensorboard if log_event_path is None: log_event_path = \"log/run-test\" +", "f), paths)) if multi_speaker and self.speaker_id is not None: speaker_ids", "float(mel_binary_div.data[0]), global_step) if train_postnet: writer.add_scalar(\"linear_loss\", float(linear_loss.data[0]), global_step) writer.add_scalar(\"linear_l1_loss\", float(linear_l1_loss.data[0]), global_step)", "print(\"Loading embedding from {}\".format(load_embedding)) _load_embedding(load_embedding, model) # Setup summary writer", "writer, mel_outputs, linear_outputs, attn, mel, y, input_lengths, checkpoint_dir=None): print(\"Save intermediate", "train_seq2seq, train_postnet = True, True if train_seq2seq: print(\"Training seq2seq model\")", "a small patch and randomize it 3. Permutate mini-batchs \"\"\"", "= list(map(lambda f: join(self.data_root, f), paths)) if multi_speaker and self.speaker_id", "np.asarray(seq, dtype=np.int32) class _NPYDataSource(FileDataSource): def __init__(self, data_root, col, speaker_id=None): self.data_root", "if train_seq2seq and train_postnet: mel_outputs, linear_outputs, attn, done_hat = model(", "# Train! try: train(model, data_loader, optimizer, writer, init_lr=hparams.initial_learning_rate, checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.checkpoint_interval,", "batch_size == 0 self.permutate = permutate def __iter__(self): indices =", "i + 1)) save_alignment(path, alignment) # Save averaged alignment alignment_dir", "ax.imshow( alignment, aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel = 'Decoder", "\"\"\"Trainining script for seq2seq text-to-speech synthesis model. usage: train.py [options]", "speaker_ids=speaker_ids, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) elif train_seq2seq: assert speaker_ids is None", "dataset = PyTorchDataset(X, Mel, Y) data_loader = data_utils.DataLoader( dataset, batch_size=hparams.batch_size,", "spectrogram-domain mask target_mask = sequence_mask( target_lengths, max_len=y.size(1)).unsqueeze(-1) else: target_mask =", "or train_postnet global global_step, global_epoch while global_epoch < nepochs: running_loss", "texts, speaker_ids else: return texts def collect_features(self, *args): if self.multi_speaker:", "path = join(eval_output_dir, \"step{:09d}_text{}_{}_alignment.png\".format( global_step, idx, speaker_str)) save_alignment(path, alignment) tag", "return texts return texts, speaker_ids else: return texts def collect_features(self,", "constant_values=0): return np.pad(seq, (0, max_len - len(seq)), mode='constant', constant_values=constant_values) def", "hparams.masked_loss_weight # L1 loss if w > 0: assert mask", "is None linear_outputs = model.postnet(mel) mel_outputs, attn, done_hat = None,", "ismultispeaker) # Update loss.backward() if clip_thresh > 0: grad_norm =", "else None if use_cuda: if train_seq2seq: x = x.cuda() text_positions", "= list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[self.col], lines)) paths = list(map(lambda f: join(self.data_root,", "= torch.load(path)[\"state_dict\"] model_dict = model.state_dict() valid_state_dict = {k: v for", "i, alignment in enumerate(attn): alignment = alignment[idx].cpu().data.numpy() tag = \"alignment_layer{}\".format(i", "for x in batch], dtype=np.int) text_positions = torch.LongTensor(text_positions) max_decoder_target_len =", "downsample_step # Feed data x, mel, y = Variable(x), Variable(mel),", "= \"\" m = model elif train_seq2seq: suffix = \"_seq2seq\"", "multi_speaker: speaker_ids = torch.LongTensor([x[3] for x in batch]) else: speaker_ids", "target_mask = None, None # Apply model if train_seq2seq and", "intelligence that has never gone out of style.\", \"President Trump", "* g)) return W def guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2): B", "usage: train.py [options] options: --data-root=<dir> Directory contains preprocessed features. --checkpoint-dir=<dir>", "from dv3.deepvoice3_pytorch import frontend, builder import dv3.audio import dv3.lrschedule import", "z = -y * y_hat_logits + torch.log(1 + torch.exp(y_hat_logits)) if", "constant_values=0) return x def plot_alignment(alignment, path, info=None): fig, ax =", "plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout() plt.savefig(path, format='png') plt.close() class TextDataSource(FileDataSource): def", "format='png') plt.close() class TextDataSource(FileDataSource): def __init__(self, data_root, speaker_id=None): self.data_root =", "list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines)) # Filter by speaker_id # using", "from numba import jit from nnmnkwii.datasets import FileSourceDataset, FileDataSource from", "ax = plt.subplots() im = ax.imshow( alignment, aspect='auto', origin='lower', interpolation='none')", "lengths 2. Pick a small patch and randomize it 3.", "def train(model, data_loader, optimizer, writer, init_lr=0.002, checkpoint_dir=None, checkpoint_interval=None, nepochs=None, clip_thresh=1.0,", "Restore seq2seq model from checkpoint path. --checkpoint-postnet=<path> Restore postnet model", "= model.postnet(mel) mel_outputs, attn, done_hat = None, None, None #", "with other leaders at the Group of 20 conference.\", \"Generative", "- 1) input_length = input_lengths[idx] # Alignment # Multi-hop attention", "multi_speaker = len(batch[0]) == 4 # Lengths input_lengths = [len(x[0])", "eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker) # Update loss.backward() if clip_thresh", "model): print(\"Restore part of the model from: {}\".format(path)) state =", "> 0: # decoder output domain mask decoder_target_mask = sequence_mask(", "self.batch_size)[perm, :].view(-1) # Handle last elements s += batch_group_size if", "if max_target_len % r != 0: max_target_len += r -", "l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin]) else: priority_loss = l1(y_hat[:,", "torch.LongTensor([x[3] for x in batch]) else: speaker_ids = None return", "for Position encoding text_positions, frame_positions = positions # Downsample mel", "global_step, idx, speaker_str)) dv3.audio.save_wav(signal, path) try: writer.add_audio(\"(Eval) Predicted audio signal", "spectrogram if mel_outputs is not None: mel_output = mel[idx].cpu().data.numpy() mel_output", "seq2seq text-to-speech synthesis model. usage: train.py [options] options: --data-root=<dir> Directory", "!= 0: max_target_len += downsample_step - max_target_len % downsample_step assert", "__init__(self, data_root, speaker_id=None): super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id) class PartialyRandomizedSimilarTimeLengthSampler(Sampler): \"\"\"Partially", "[len(x[0]) for x in batch] max_input_len = max(input_lengths) target_lengths =", "\"Please call Stella.\", \"Some have accepted this as a miracle", "list(np.array(self.frame_lengths)[indices]) # aha, need to cast numpy.int64 to int self.frame_lengths", "as e: warn(str(e)) pass dv3.audio.save_wav(signal, path) # Target mel spectrogram", "if checkpoint_postnet_path is not None: load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer) if", "self.X = X self.Mel = Mel self.Y = Y #", "not None and priority_w > 0: if w > 0:", "SummaryWriter from matplotlib import cm from warnings import warn from", "target * mask_) return loss / mask_.sum() def collate_fn(batch): \"\"\"Create", "g=hparams.guided_attention_sigma) soft_mask = Variable(torch.from_numpy(soft_mask)) soft_mask = soft_mask.cuda() if use_cuda else", "= sequence_length.size(0) seq_range = torch.arange(0, max_len).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)", "_pad_2d(x, max_len, b_pad=0): x = np.pad(x, [(b_pad, max_len - len(x)", "texts = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[3], lines)) if self.multi_speaker: speaker_ids =", "= input_lengths[idx] # Alignment # Multi-hop attention if attn is", "= np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len) for x in batch],", "linear_loss # attention if train_seq2seq and hparams.use_guided_attention: soft_mask = guided_attentions(input_lengths,", "train_seq2seq=train_seq2seq, train_postnet=train_postnet) except KeyboardInterrupt: save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch,", "train_postnet: y = y.cuda() mel = mel.cuda() done, target_lengths =", "data_root self.speaker_ids = None self.multi_speaker = False # If not", "= max(target_lengths) if max_target_len % r != 0: max_target_len +=", "paths = list(np.array(paths)[indices]) self.frame_lengths = list(np.array(self.frame_lengths)[indices]) # aha, need to", "torch.FloatTensor(c) # text positions text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1),", "= l1(y_hat, y) # Priority L1 loss if priority_bin is", "checkpoint:\", checkpoint_path) def build_model(): model = getattr(builder, hparams.builder)( n_speakers=hparams.n_speakers, speaker_embed_dim=hparams.speaker_embed_dim,", "= (1 - w) * linear_l1_loss + w * linear_binary_div", "# (B, T, D) mask_ = mask.expand_as(y) return (y *", "return (seq_range_expand < seq_length_expand).float() class MaskedL1Loss(nn.Module): def __init__(self): super(MaskedL1Loss, self).__init__()", "from tqdm import tqdm, trange from datetime import datetime #", "train_seq2seq = args[\"--train-seq2seq-only\"] train_postnet = args[\"--train-postnet-only\"] # train both if", "mel_batch = torch.FloatTensor(b) c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x", "= Variable(target_lengths) speaker_ids = Variable(speaker_ids) if ismultispeaker else None if", "from torch.autograd import Variable from torch import nn from torch", "Variable(text_positions) frame_positions = Variable(frame_positions) done = Variable(done) target_lengths = Variable(target_lengths)", "= False _frontend = None # to be set later", "sys from os.path import dirname, join from tqdm import tqdm,", "mel, y, input_lengths, checkpoint_dir) save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch,", "priority_loss # Binary divergence loss if hparams.binary_divergence_weight <= 0: binary_div", "eps) def masked_mean(y, mask): # (B, T, D) mask_ =", "to save model checkpoints [default: checkpoints]. --hparams=<parmas> Hyper parameters [default:", "- b_pad), (0, 0)], mode=\"constant\", constant_values=0) return x def plot_alignment(alignment,", "g=0.2): B = len(input_lengths) max_input_len = input_lengths.max() W = np.zeros((B,", "+ torch.exp(y_hat_logits)) if w > 0: binary_div = w *", "done, target_lengths = done.cuda(), target_lengths.cuda() speaker_ids = speaker_ids.cuda() if ismultispeaker", "y.cuda() mel = mel.cuda() done, target_lengths = done.cuda(), target_lengths.cuda() speaker_ids", "lengths=None, mask=None, max_len=None): if lengths is None and mask is", "= model.cuda() linear_dim = model.linear_dim r = hparams.outputs_per_step downsample_step =", "log_event_path = \"log/run-test\" + str(datetime.now()).replace(\" \", \"_\") print(\"Los event path:", "W = np.zeros((max_N, max_T), dtype=np.float32) for n in range(N): for", "import os from tensorboardX import SummaryWriter from matplotlib import cm", "w > 0: priority_loss = w * masked_l1( y_hat[:, :,", "states b_pad = r max_target_len += b_pad * downsample_step a", "global_step) def logit(x, eps=1e-8): return torch.log(x + eps) - torch.log(1", "x in batch] max_input_len = max(input_lengths) target_lengths = [len(x[1]) for", "dv3.audio.save_wav(signal, path) try: writer.add_audio(\"(Eval) Predicted audio signal {}_{}\".format(idx, speaker_str), signal,", "self.speaker_id = speaker_id def collect_files(self): meta = join(self.data_root, \"train.txt\") with", "Restore model from checkpoint path if given. --checkpoint-seq2seq=<path> Restore seq2seq", "to cast numpy.int64 to int self.frame_lengths = list(map(int, self.frame_lengths)) return", "to be set later def _pad(seq, max_len, constant_values=0): return np.pad(seq,", "train_seq2seq: mel_l1_loss, mel_binary_div = spec_loss( mel_outputs[:, :-r, :], mel[:, r:,", "Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id)) # Prepare sampler frame_lengths = Mel.file_data_source.frame_lengths", "linear_outputs[:, :-r, :], y[:, r:, :], target_mask, priority_bin=n_priority_freq, priority_w=hparams.priority_freq_weight) linear_loss", "= optim.Adam(model.get_trainable_parameters(), lr=hparams.initial_learning_rate, betas=( hparams.adam_beta1, hparams.adam_beta2), eps=hparams.adam_eps, weight_decay=hparams.weight_decay) if checkpoint_restore_parts", "if train_postnet: writer.add_scalar(\"linear_loss\", float(linear_loss.data[0]), global_step) writer.add_scalar(\"linear_l1_loss\", float(linear_l1_loss.data[0]), global_step) writer.add_scalar(\"linear_binary_div_loss\", float(", "zero beginning padding # imitates initial decoder states b_pad =", "[len(x[1]) for x in batch] max_target_len = max(target_lengths) if max_target_len", "x in batch], dtype=np.float32) mel_batch = torch.FloatTensor(b) c = np.array([_pad_2d(x[2],", "for seq2seq text-to-speech synthesis model. usage: train.py [options] options: --data-root=<dir>", "epoch)\", averaged_loss, global_epoch) print(\"Loss: {}\".format(running_loss / (len(data_loader)))) global_epoch += 1", "save_alignment(path, alignment) # Save averaged alignment alignment_dir = join(checkpoint_dir, \"alignment_ave\")", "import pyplot as plt import sys import os from tensorboardX", "speaker_id # using multi-speaker dataset as a single speaker dataset", "speaker_id=None): super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id) class PartialyRandomizedSimilarTimeLengthSampler(Sampler): \"\"\"Partially randmoized sampler", "r assert max_target_len % r == 0 if max_target_len %", "frame positions s, e = 1, max_decoder_target_len + 1 #", "> 0: binary_div = w * masked_mean(z, mask) + (1", "w) * l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin]) else: priority_loss", "is not None: mel_output = mel_outputs[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Predicted", "= positions # Downsample mel spectrogram if downsample_step > 1:", "> 0: writer.add_scalar(\"gradient norm\", grad_norm, global_step) writer.add_scalar(\"learning rate\", current_lr, global_step)", "global_epoch = checkpoint[\"global_epoch\"] return model def _load_embedding(path, model): state =", "from torch import optim import torch.backends.cudnn as cudnn from torch.utils", "Variable from torch import nn from torch import optim import", "provide either lengths or mask\") # (B, T, 1) if", "gone out of style.\", \"President Trump met with other leaders", "and hparams.use_guided_attention: soft_mask = guided_attentions(input_lengths, decoder_lengths, attn.size(-2), g=hparams.guided_attention_sigma) soft_mask =", "train_seq2seq and hparams.use_guided_attention: writer.add_scalar(\"attn_loss\", float(attn_loss.data[0]), global_step) if clip_thresh > 0:", "assert False, \"must be specified wrong args\" # Override hyper", "%H:%M') def save_alignment(path, attn): plot_alignment(attn.T, path, info=\"{}, {}, step={}\".format( hparams.builder,", "current_lr = init_lr binary_criterion = nn.BCELoss() assert train_seq2seq or train_postnet", "data as data_utils from torch.autograd import Variable from torch import", "4 or len(l) == 5 multi_speaker = len(l) == 5", "speaker_ids = [0, 1, 10] if ismultispeaker else [None] for", "model\") elif train_postnet: print(\"Training postnet model\") else: assert False, \"must", "float(done_loss.data[0]), global_step) writer.add_scalar(\"mel loss\", float(mel_loss.data[0]), global_step) writer.add_scalar(\"mel_l1_loss\", float(mel_l1_loss.data[0]), global_step) writer.add_scalar(\"mel_binary_div_loss\",", "def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch, train_seq2seq, train_postnet): if train_seq2seq", "attn_loss if global_step > 0 and global_step % checkpoint_interval ==", "optimizer.state_dict() if hparams.save_optimizer_state else None torch.save({ \"state_dict\": m.state_dict(), \"optimizer\": optimizer_state,", "save model checkpoints [default: checkpoints]. --hparams=<parmas> Hyper parameters [default: ].", "loss\", float(mel_loss.data[0]), global_step) writer.add_scalar(\"mel_l1_loss\", float(mel_l1_loss.data[0]), global_step) writer.add_scalar(\"mel_binary_div_loss\", float(mel_binary_div.data[0]), global_step) if", "text in enumerate(texts): signal, alignment, _, mel = synthesis.tts( model,", "- 1), max_decoder_target_len, constant_values=1) for x in batch]) done =", "lines)) paths = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[self.col], lines)) paths = list(map(lambda", "return len(self.sorted_indices) class PyTorchDataset(object): def __init__(self, X, Mel, Y): self.X", "W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32) for b in range(B):", "optimizer.step() # Logs writer.add_scalar(\"loss\", float(loss.data[0]), global_step) if train_seq2seq: writer.add_scalar(\"done_loss\", float(done_loss.data[0]),", "mel_outputs[:, :-r, :], mel[:, r:, :], decoder_target_mask) mel_loss = (1", "\"\" m = model elif train_seq2seq: suffix = \"_seq2seq\" m", "= max(input_lengths) target_lengths = [len(x[1]) for x in batch] max_target_len", "Set 0 for zero beginning padding # imitates initial decoder", "downsample_step == 0 # Set 0 for zero beginning padding", "None if use_cuda: if train_seq2seq: x = x.cuda() text_positions =", "\"global_step\": step, \"global_epoch\": epoch, }, checkpoint_path) print(\"Saved checkpoint:\", checkpoint_path) def", "r:, :] target_mask = target_mask[:, r:, :] else: decoder_target_mask, target_mask", "(per epoch)\", averaged_loss, global_epoch) print(\"Loss: {}\".format(running_loss / (len(data_loader)))) global_epoch +=", "signal, alignment, _, mel = synthesis.tts( model, text, p=0, speaker_id=speaker_id,", "not None: speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines)) # Filter", "particle.\", \"There's a way to measure the acute emotional intelligence", "for now alignment_dir = join(checkpoint_dir, \"alignment_layer{}\".format(i + 1)) os.makedirs(alignment_dir, exist_ok=True)", "hparams.adam_beta2), eps=hparams.adam_eps, weight_decay=hparams.weight_decay) if checkpoint_restore_parts is not None: restore_parts(checkpoint_restore_parts, model)", "print(\"dataloader_prepared\") # Model model = build_model() if use_cuda: model =", "self.frame_lengths = list( map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[2]), lines)) paths = list(map(lambda", "class MaskedL1Loss(nn.Module): def __init__(self): super(MaskedL1Loss, self).__init__() self.criterion = nn.L1Loss(size_average=False) def", "torch.exp(y_hat_logits)) if w > 0: binary_div = w * masked_mean(z,", "alignment = attn.mean(0)[idx].cpu().data.numpy() save_alignment(path, alignment) tag = \"averaged_alignment\" writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment,", "speaker_id)) Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id)) Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id)) #", "import numpy as np from numba import jit from nnmnkwii.datasets", "self.permutate = permutate def __iter__(self): indices = self.sorted_indices.clone() batch_group_size =", "any physical explanation.\", ] import dv3.synthesis synthesis._frontend = _frontend eval_output_dir", "Log event path. --reset-optimizer Reset optimizer. --load-embedding=<path> Load embedding from", "np.max(np.abs(signal)) # Alignment path = join(eval_output_dir, \"step{:09d}_text{}_{}_alignment.png\".format( global_step, idx, speaker_str))", "return (y * mask_).sum() / mask_.sum() def spec_loss(y_hat, y, mask,", "# Downsample mel spectrogram if downsample_step > 1: mel =", "import dv3.synthesis synthesis._frontend = _frontend eval_output_dir = join(checkpoint_dir, \"eval\") os.makedirs(eval_output_dir,", "if train_seq2seq: print(\"Training seq2seq model\") elif train_postnet: print(\"Training postnet model\")", "fs = hparams.sample_rate global_step = 0 global_epoch = 0 use_cuda", "y, input_lengths, checkpoint_dir) save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq,", "* 0.5) * linear_dim) linear_l1_loss, linear_binary_div = spec_loss( linear_outputs[:, :-r,", "and attn.dim() == 4: for i, alignment in enumerate(attn): alignment", "binary_criterion(done_hat, done) # linear: if train_postnet: n_priority_freq = int(hparams.priority_freq /", "1: # spectrogram-domain mask target_mask = sequence_mask( target_lengths, max_len=y.size(1)).unsqueeze(-1) else:", "return W def train(model, data_loader, optimizer, writer, init_lr=0.002, checkpoint_dir=None, checkpoint_interval=None,", "linear_loss + done_loss elif train_seq2seq: loss = mel_loss + done_loss", "mel_outputs, attn, done_hat = None, None, None # Losses w", "mel_output = mel_outputs[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Predicted mel spectrogram\", mel_output,", "n_speakers=hparams.n_speakers, speaker_embed_dim=hparams.speaker_embed_dim, n_vocab=_frontend.n_vocab, embed_dim=hparams.text_embed_dim, mel_dim=hparams.num_mels, linear_dim=hparams.fft_size // 2 + 1,", "0)], mode=\"constant\", constant_values=0) return x def plot_alignment(alignment, path, info=None): fig,", "% batch_size != 0: batch_group_size -= batch_group_size % batch_size self.batch_group_size", "<reponame>drat/Neural-Voice-Cloning-With-Few-Samples \"\"\"Trainining script for seq2seq text-to-speech synthesis model. usage: train.py", "max_decoder_target_len = max_target_len // r // downsample_step # frame positions", "as e: warn(str(e)) pass def save_states(global_step, writer, mel_outputs, linear_outputs, attn,", "done_loss = binary_criterion(done_hat, done) # linear: if train_postnet: n_priority_freq =", "data_root, col, speaker_id=None): self.data_root = data_root self.col = col self.frame_lengths", "= join(dirname(__file__), \"data\", \"ljspeech\") log_event_path = args[\"--log-event-path\"] reset_optimizer = args[\"--reset-optimizer\"]", "N - t / T)**2 / (2 * g *", "decoder states b_pad = r max_target_len += b_pad * downsample_step", "prepare_spec_image(mel), global_step) # Audio path = join(eval_output_dir, \"step{:09d}_text{}_{}_predicted.wav\".format( global_step, idx,", "downsample_step > 1: # spectrogram-domain mask target_mask = sequence_mask( target_lengths,", "assert max_target_len % r == 0 if max_target_len % downsample_step", "print(\"Save intermediate states at step {}\".format(global_step)) # idx = np.random.randint(0,", "FileSourceDataset(MelSpecDataSource(data_root, speaker_id)) Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id)) # Prepare sampler frame_lengths", "priority_loss = l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin]) l1_loss =", "float(loss.data[0]), global_step) if train_seq2seq: writer.add_scalar(\"done_loss\", float(done_loss.data[0]), global_step) writer.add_scalar(\"mel loss\", float(mel_loss.data[0]),", "soft_mask = soft_mask.cuda() if use_cuda else soft_mask attn_loss = (attn", "in range(N): for t in range(T): W[n, t] = 1", "> 0 and global_step % checkpoint_interval == 0: save_states( global_step,", "docopt(__doc__) print(\"Command line args:\\n\", args) checkpoint_dir = args[\"--checkpoint-dir\"] checkpoint_path =", "files as well for now alignment_dir = join(checkpoint_dir, \"alignment_layer{}\".format(i +", "- max_target_len % r assert max_target_len % r == 0", "for x in batch] max_input_len = max(input_lengths) target_lengths = [len(x[1])", "* downsample_step a = np.array([_pad(x[0], max_input_len) for x in batch],", "(1 - w) * mel_l1_loss + w * mel_binary_div #", "# Target mel spectrogram if mel_outputs is not None: mel_output", "hparams.binary_divergence_weight # mel: if train_seq2seq: mel_l1_loss, mel_binary_div = spec_loss( mel_outputs[:,", "Logs writer.add_scalar(\"loss\", float(loss.data[0]), global_step) if train_seq2seq: writer.add_scalar(\"done_loss\", float(done_loss.data[0]), global_step) writer.add_scalar(\"mel", "Target mel spectrogram if mel_outputs is not None: mel_output =", ":priority_bin], y[:, :, :priority_bin]) l1_loss = (1 - priority_w) *", "multi-speaker dataset as a single speaker dataset indices = np.array(speaker_ids)", "= Y # alias self.multi_speaker = X.file_data_source.multi_speaker def __getitem__(self, idx):", "r max_target_len += b_pad * downsample_step a = np.array([_pad(x[0], max_input_len)", "data_root = args[\"--data-root\"] if data_root is None: data_root = join(dirname(__file__),", "either lengths or mask\") # (B, T, 1) if mask", "import json hparams.parse_json(json.dumps(preset)) print(\"Override hyper parameters with preset \\\"{}\\\": {}\".format(", "writer, model, checkpoint_dir, ismultispeaker): # harded coded texts = [", "train_seq2seq and train_postnet: loss = mel_loss + linear_loss + done_loss", "= int(hparams.priority_freq / (fs * 0.5) * linear_dim) linear_l1_loss, linear_binary_div", "k, v in state.items() if k in model_dict} model_dict.update(valid_state_dict) model.load_state_dict(model_dict)", "= prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Target mel spectrogram\", mel_output, global_step) # Target spectrogram", "if hparams.masked_loss_weight > 0: # decoder output domain mask decoder_target_mask", "0: save_states( global_step, writer, mel_outputs, linear_outputs, attn, mel, y, input_lengths,", "step, \"global_epoch\": epoch, }, checkpoint_path) print(\"Saved checkpoint:\", checkpoint_path) def build_model():", "-= batch_group_size % batch_size self.batch_group_size = batch_group_size assert batch_group_size %", "shift mask decoder_target_mask = decoder_target_mask[:, r:, :] target_mask = target_mask[:,", "b_pad * downsample_step a = np.array([_pad(x[0], max_input_len) for x in", "try: train(model, data_loader, optimizer, writer, init_lr=hparams.initial_learning_rate, checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.checkpoint_interval, nepochs=hparams.nepochs, clip_thresh=hparams.clip_thresh,", "target_mask[:, r:, :] else: decoder_target_mask, target_mask = None, None #", "sampler frame_lengths = Mel.file_data_source.frame_lengths sampler = PartialyRandomizedSimilarTimeLengthSampler( frame_lengths, batch_size=hparams.batch_size) #", "self.speaker_id texts = list(np.array(texts)[indices]) self.multi_speaker = False return texts return", "not None: load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer) if checkpoint_seq2seq_path is not", "= args[\"--data-root\"] if data_root is None: data_root = join(dirname(__file__), \"data\",", "# Predicted audio signal signal = dv3.audio.inv_spectrogram(linear_output.T) signal /= np.max(np.abs(signal))", "c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32)", "> 0: priority_loss = w * masked_l1( y_hat[:, :, :priority_bin],", "int(hparams.priority_freq / (fs * 0.5) * linear_dim) linear_l1_loss, linear_binary_div =", "load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer) if checkpoint_seq2seq_path is not None: load_checkpoint(checkpoint_seq2seq_path,", "# train both if not specified if not train_seq2seq and", "suffix = \"_seq2seq\" m = model.seq2seq elif train_postnet: suffix =", ":-r, :], y[:, r:, :], target_mask, priority_bin=n_priority_freq, priority_w=hparams.priority_freq_weight) linear_loss =", "running_loss = 0. for step, (x, input_lengths, mel, y, positions,", "(r * downsample_step), max_len=mel.size(1)).unsqueeze(-1) if downsample_step > 1: # spectrogram-domain", "use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input, max_positions=hparams.max_positions, speaker_embedding_weight_std=hparams.speaker_embedding_weight_std, freeze_embedding=hparams.freeze_embedding, window_ahead=hparams.window_ahead, window_backward=hparams.window_backward, key_projection=hparams.key_projection, value_projection=hparams.value_projection, ) return", "as np from numba import jit from nnmnkwii.datasets import FileSourceDataset,", "- w) * l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin]) else:", "reset_optimizer = args[\"--reset-optimizer\"] # Which model to be trained train_seq2seq", "join from tqdm import tqdm, trange from datetime import datetime", "Predicted mel spectrogram text{}_{}\".format(idx, speaker_str), prepare_spec_image(mel), global_step) # Audio path", "train_postnet: n_priority_freq = int(hparams.priority_freq / (fs * 0.5) * linear_dim)", "optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet) if global_step > 0", "== 0 # Set 0 for zero beginning padding #", "for t in range(T): W[n, t] = 1 - np.exp(-(n", "# imitates initial decoder states b_pad = r max_target_len +=", "\"single\" for idx, text in enumerate(texts): signal, alignment, _, mel", "attn.size(-2), g=hparams.guided_attention_sigma) soft_mask = Variable(torch.from_numpy(soft_mask)) soft_mask = soft_mask.cuda() if use_cuda", "is None: mask = sequence_mask(lengths, max_len).unsqueeze(-1) # (B, T, D)", "np.flip(spectrogram, axis=1) # flip against freq axis return np.uint8(cm.magma(spectrogram.T) *", "* mask_) return loss / mask_.sum() def collate_fn(batch): \"\"\"Create batch\"\"\"", "0 and global_step % checkpoint_interval == 0: save_states( global_step, writer,", "global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet) if global_step > 0 and", "hparams.use_guided_attention: writer.add_scalar(\"attn_loss\", float(attn_loss.data[0]), global_step) if clip_thresh > 0: writer.add_scalar(\"gradient norm\",", "float(attn_loss.data[0]), global_step) if clip_thresh > 0: writer.add_scalar(\"gradient norm\", grad_norm, global_step)", "constant_values=constant_values) def _pad_2d(x, max_len, b_pad=0): x = np.pad(x, [(b_pad, max_len", "binary_div = Variable(y.data.new(1).zero_()) else: y_hat_logits = logit(y_hat) z = -y", "spectrogram\", spectrogram, global_step) # Predicted audio signal signal = dv3.audio.inv_spectrogram(linear_output.T)", "input_lengths = [len(x[0]) for x in batch] max_input_len = max(input_lengths)", "m = model.seq2seq elif train_postnet: suffix = \"_postnet\" m =", "downsample_step = hparams.downsample_step multi_speaker = len(batch[0]) == 4 # Lengths", "eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker): # harded coded texts =", "ismultispeaker else None if use_cuda: if train_seq2seq: x = x.cuda()", "import cm from warnings import warn from dv3.hparams import hparams,", "path, info=\"{}, {}, step={}\".format( hparams.builder, time_string(), global_step)) def prepare_spec_image(spectrogram): #", "np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32) mel_batch =", "len(l) == 5 self.multi_speaker = len(l) == 5 texts =", "join(eval_output_dir, \"step{:09d}_text{}_{}_predicted.wav\".format( global_step, idx, speaker_str)) dv3.audio.save_wav(signal, path) try: writer.add_audio(\"(Eval) Predicted", "attn, done_hat = None, None, None # Losses w =", "idx, speaker_str)) dv3.audio.save_wav(signal, path) try: writer.add_audio(\"(Eval) Predicted audio signal {}_{}\".format(idx,", "def forward(self, input, target, lengths=None, mask=None, max_len=None): if lengths is", "if speaker_id is not None else None data_root = args[\"--data-root\"]", "linear_outputs, attn, mel, y, input_lengths, checkpoint_dir=None): print(\"Save intermediate states at", "if w > 0: priority_loss = w * masked_l1( y_hat[:,", "def __len__(self): return len(self.X) def sequence_mask(sequence_length, max_len=None): if max_len is", "if s < len(indices): random.shuffle(indices[s:]) return iter(indices) def __len__(self): return", "text = args[0] seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob) if self.multi_speaker: return", "print(\"Saved checkpoint:\", checkpoint_path) def build_model(): model = getattr(builder, hparams.builder)( n_speakers=hparams.n_speakers,", "if self.multi_speaker: text, speaker_id = args else: text = args[0]", "The deepvoice3 model from dv3.deepvoice3_pytorch import frontend, builder import dv3.audio", "batch] max_target_len = max(target_lengths) if max_target_len % r != 0:", "= done.cuda(), target_lengths.cuda() speaker_ids = speaker_ids.cuda() if ismultispeaker else None", "max_target_len, g).T return W def train(model, data_loader, optimizer, writer, init_lr=0.002,", "np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len) for x in batch], dtype=np.int)", "= input_lengths.max() W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32) for b", "= binary_criterion(done_hat, done) # linear: if train_postnet: n_priority_freq = int(hparams.priority_freq", "seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand = Variable(seq_range_expand) if sequence_length.is_cuda: seq_range_expand", "t in range(T): W[n, t] = 1 - np.exp(-(n /", "if train_postnet: y = y.cuda() mel = mel.cuda() done, target_lengths", "r:, :], decoder_target_mask) mel_loss = (1 - w) * mel_l1_loss", "if __name__ == \"__main__\": args = docopt(__doc__) print(\"Command line args:\\n\",", "SummaryWriter(log_dir=log_event_path) # Train! try: train(model, data_loader, optimizer, writer, init_lr=hparams.initial_learning_rate, checkpoint_dir=checkpoint_dir,", "\"step{:09d}_text{}_{}_alignment.png\".format( global_step, idx, speaker_str)) save_alignment(path, alignment) tag = \"eval_averaged_alignment_{}_{}\".format(idx, speaker_str)", "emotional intelligence that has never gone out of style.\", \"President", "def __init__(self, data_root, speaker_id=None): self.data_root = data_root self.speaker_ids = None", "new particle.\", \"There's a way to measure the acute emotional", "else: return self.X[idx], self.Mel[idx], self.Y[idx] def __len__(self): return len(self.X) def", "not None else None data_root = args[\"--data-root\"] if data_root is", "None and mask is None: raise RuntimeError(\"Should provide either lengths", "frame_positions = positions # Downsample mel spectrogram if downsample_step >", "s, e = 1, max_decoder_target_len + 1 # if b_pad", "0 global_epoch = 0 use_cuda = torch.cuda.is_available() if use_cuda: cudnn.benchmark", "Override hyper parameters hparams.parse(args[\"--hparams\"]) print(hparams_debug_string()) assert hparams.name == \"deepvoice3\" #", "(1 - w) * z.mean() else: binary_div = z.mean() return", "* linear_l1_loss + w * linear_binary_div # Combine losses if", "elif train_seq2seq: loss = mel_loss + done_loss elif train_postnet: loss", "state.items() if k in model_dict} model_dict.update(valid_state_dict) model.load_state_dict(model_dict) if __name__ ==", "mel spectrogram if downsample_step > 1: mel = mel[:, 0::downsample_step,", "= _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob) if self.multi_speaker: return np.asarray(seq, dtype=np.int32), int(speaker_id) else:", "def collect_files(self): meta = join(self.data_root, \"train.txt\") with open(meta, \"rb\") as", "* l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin]) else: priority_loss =", "= list(np.array(texts)[indices]) self.multi_speaker = False return texts return texts, speaker_ids", "expanduser import random import librosa.display from matplotlib import pyplot as", "lengths is None and mask is None: raise RuntimeError(\"Should provide", "join(dirname(__file__), \"data\", \"ljspeech\") log_event_path = args[\"--log-event-path\"] reset_optimizer = args[\"--reset-optimizer\"] #", "(y * mask_).sum() / mask_.sum() def spec_loss(y_hat, y, mask, priority_bin=None,", "= mel_loss + done_loss elif train_postnet: loss = linear_loss #", "texts = list(np.array(texts)[indices]) self.multi_speaker = False return texts return texts,", "is None l1_loss = l1(y_hat, y) # Priority L1 loss", "and Dataloader setup dataset = PyTorchDataset(X, Mel, Y) data_loader =", "0: priority_loss = w * masked_l1( y_hat[:, :, :priority_bin], y[:,", "return self.X[idx], self.Mel[idx], self.Y[idx] def __len__(self): return len(self.X) def sequence_mask(sequence_length,", "np.zeros((B, max_target_len, max_input_len), dtype=np.float32) for b in range(B): W[b] =", "train_seq2seq and hparams.use_guided_attention: soft_mask = guided_attentions(input_lengths, decoder_lengths, attn.size(-2), g=hparams.guided_attention_sigma) soft_mask", "train_postnet = args[\"--train-postnet-only\"] # train both if not specified if", "elements s += batch_group_size if s < len(indices): random.shuffle(indices[s:]) return", "s < len(indices): random.shuffle(indices[s:]) return iter(indices) def __len__(self): return len(self.sorted_indices)", "alignment[idx].cpu().data.numpy() tag = \"alignment_layer{}\".format(i + 1) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) *", "logit(x, eps=1e-8): return torch.log(x + eps) - torch.log(1 - x", "__init__(self, data_root, speaker_id=None): self.data_root = data_root self.speaker_ids = None self.multi_speaker", "decoder_target_mask, target_mask = None, None # Apply model if train_seq2seq", "x def plot_alignment(alignment, path, info=None): fig, ax = plt.subplots() im", "mask_.sum() def spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0): masked_l1 = MaskedL1Loss()", "data_root self.col = col self.frame_lengths = [] self.speaker_id = speaker_id", "path) try: writer.add_audio(\"(Eval) Predicted audio signal {}_{}\".format(idx, speaker_str), signal, global_step,", "if we use masked loss if hparams.masked_loss_weight > 0: #", "= synthesis.tts( model, text, p=0, speaker_id=speaker_id, fast=False) signal /= np.max(np.abs(signal))", "model.postnet, optimizer, reset_optimizer) if checkpoint_seq2seq_path is not None: load_checkpoint(checkpoint_seq2seq_path, model.seq2seq,", "None # Learning rate schedule if hparams.lr_schedule is not None:", "y_hat[:, :, :priority_bin], y[:, :, :priority_bin], mask=mask) \\ + (1", "global_step, writer, mel_outputs, linear_outputs, attn, mel, y, input_lengths, checkpoint_dir) save_checkpoint(", "# Priority L1 loss if priority_bin is not None and", "is not None # Learning rate schedule if hparams.lr_schedule is", "collect_features(self, *args): if self.multi_speaker: text, speaker_id = args else: text", "/ (len(data_loader)) writer.add_scalar(\"loss (per epoch)\", averaged_loss, global_epoch) print(\"Loss: {}\".format(running_loss /", "torch.nn.utils.clip_grad_norm( model.get_trainable_parameters(), clip_thresh) optimizer.step() # Logs writer.add_scalar(\"loss\", float(loss.data[0]), global_step) if", "permutate def __iter__(self): indices = self.sorted_indices.clone() batch_group_size = self.batch_group_size s,", "r - max_target_len % r assert max_target_len % r ==", "mel_binary_div = spec_loss( mel_outputs[:, :-r, :], mel[:, r:, :], decoder_target_mask)", "max_target_len % downsample_step != 0: max_target_len += downsample_step - max_target_len", "and mask is None: raise RuntimeError(\"Should provide either lengths or", "- np.min(spectrogram)) spectrogram = np.flip(spectrogram, axis=1) # flip against freq", "numpy.int64 to int self.frame_lengths = list(map(int, self.frame_lengths)) return paths def", "input_lengths=input_lengths) elif train_seq2seq: assert speaker_ids is None mel_outputs, attn, done_hat,", "int(speaker_id) else: return np.asarray(seq, dtype=np.int32) class _NPYDataSource(FileDataSource): def __init__(self, data_root,", "is None: data_root = join(dirname(__file__), \"data\", \"ljspeech\") log_event_path = args[\"--log-event-path\"]", "variational auto-encoder.\", \"Please call Stella.\", \"Some have accepted this as", "None: data_root = join(dirname(__file__), \"data\", \"ljspeech\") log_event_path = args[\"--log-event-path\"] reset_optimizer", "checkpoint path. --train-seq2seq-only Train only seq2seq model. --train-postnet-only Train only", "frame_positions = frame_positions.cuda() if train_postnet: y = y.cuda() mel =", "well for now alignment_dir = join(checkpoint_dir, \"alignment_layer{}\".format(i + 1)) os.makedirs(alignment_dir,", "= (attn * soft_mask).mean() loss += attn_loss if global_step >", "model, optimizer, reset_optimizer): global global_step global global_epoch print(\"Load checkpoint from:", "e - 1 frame_positions = torch.arange(s, e).long().unsqueeze(0).expand( len(batch), max_decoder_target_len) #", "True if train_seq2seq: print(\"Training seq2seq model\") elif train_postnet: print(\"Training postnet", "model, checkpoint_dir, ismultispeaker) # Update loss.backward() if clip_thresh > 0:", "+ batch_group_size random.shuffle(indices[s:e]) # Permutate batches if self.permutate: perm =", "done_hat, _ = model.seq2seq( x, mel, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) #", "\"_\") print(\"Los event path: {}\".format(log_event_path)) writer = SummaryWriter(log_dir=log_event_path) # Train!", "import nn from torch import optim import torch.backends.cudnn as cudnn", "= model.seq2seq( x, mel, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) # reshape mel_outputs", "is not None: print(\"Loading embedding from {}\".format(load_embedding)) _load_embedding(load_embedding, model) #", "and self.speaker_id is not None: speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]),", "target_lengths.cuda() speaker_ids = speaker_ids.cuda() if ismultispeaker else None # Create", "hparams.use_guided_attention: soft_mask = guided_attentions(input_lengths, decoder_lengths, attn.size(-2), g=hparams.guided_attention_sigma) soft_mask = Variable(torch.from_numpy(soft_mask))", "embedding from {}\".format(load_embedding)) _load_embedding(load_embedding, model) # Setup summary writer for", "__init__(self): super(MaskedL1Loss, self).__init__() self.criterion = nn.L1Loss(size_average=False) def forward(self, input, target,", "import jit from nnmnkwii.datasets import FileSourceDataset, FileDataSource from os.path import", "= join(alignment_dir, \"step{:09d}_alignment.png\".format(global_step)) alignment = attn.mean(0)[idx].cpu().data.numpy() save_alignment(path, alignment) tag =", "model to be trained train_seq2seq = args[\"--train-seq2seq-only\"] train_postnet = args[\"--train-postnet-only\"]", "model.load_state_dict(model_dict) if __name__ == \"__main__\": args = docopt(__doc__) print(\"Command line", "save_checkpoint(model, optimizer, step, checkpoint_dir, epoch, train_seq2seq, train_postnet): if train_seq2seq and", "line args:\\n\", args) checkpoint_dir = args[\"--checkpoint-dir\"] checkpoint_path = args[\"--checkpoint\"] checkpoint_seq2seq_path", "x = np.pad(x, [(b_pad, max_len - len(x) - b_pad), (0,", "json.dumps(preset, indent=4))) _frontend = getattr(frontend, hparams.frontend) os.makedirs(checkpoint_dir, exist_ok=True) # Input", "max(target_lengths) if max_target_len % r != 0: max_target_len += r", "(1 - priority_w) * l1_loss + priority_w * priority_loss #", "hparams.adam_beta1, hparams.adam_beta2), eps=hparams.adam_eps, weight_decay=hparams.weight_decay) if checkpoint_restore_parts is not None: restore_parts(checkpoint_restore_parts,", "speaker_id) class LinearSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(LinearSpecDataSource, self).__init__(data_root, 0,", "hparams.outputs_per_step downsample_step = hparams.downsample_step current_lr = init_lr binary_criterion = nn.BCELoss()", "elif train_seq2seq: suffix = \"_seq2seq\" m = model.seq2seq elif train_postnet:", "\"checkpoint_step{:09d}{}.pth\".format(global_step, suffix)) optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None torch.save({", "restore_parts(checkpoint_restore_parts, model) # Load checkpoints if checkpoint_postnet_path is not None:", "0: binary_div = w * masked_mean(z, mask) + (1 -", "max_target_len, max_input_len), dtype=np.float32) for b in range(B): W[b] = guided_attention(input_lengths[b],", "path = join(alignment_dir, \"step{:09d}_alignment.png\".format(global_step)) alignment = attn.mean(0)[idx].cpu().data.numpy() save_alignment(path, alignment) tag", "np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32) y_batch =", "args[\"--speaker-id\"] speaker_id = int(speaker_id) if speaker_id is not None else", "nepochs=hparams.nepochs, clip_thresh=hparams.clip_thresh, train_seq2seq=train_seq2seq, train_postnet=train_postnet) except KeyboardInterrupt: save_checkpoint( model, optimizer, global_step,", "- w) * z.mean() else: binary_div = z.mean() return l1_loss,", "[default: ]. --checkpoint=<path> Restore model from checkpoint path if given.", "train_postnet=train_postnet) except KeyboardInterrupt: save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq,", "converter_channels=hparams.converter_channels, use_memory_mask=hparams.use_memory_mask, trainable_positional_encodings=hparams.trainable_positional_encodings, force_monotonic_attention=hparams.force_monotonic_attention, use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input, max_positions=hparams.max_positions, speaker_embedding_weight_std=hparams.speaker_embedding_weight_std, freeze_embedding=hparams.freeze_embedding, window_ahead=hparams.window_ahead, window_backward=hparams.window_backward,", "Reset optimizer. --load-embedding=<path> Load embedding from checkpoint. --speaker-id=<N> Use specific", "b_pad=b_pad) for x in batch], dtype=np.float32) y_batch = torch.FloatTensor(c) #", "suffix = \"_postnet\" m = model.postnet checkpoint_path = join( checkpoint_dir,", "= FileSourceDataset(LinearSpecDataSource(data_root, speaker_id)) # Prepare sampler frame_lengths = Mel.file_data_source.frame_lengths sampler", "model def _load_embedding(path, model): state = torch.load(path)[\"state_dict\"] key = \"seq2seq.encoder.embed_tokens.weight\"", "\"\"\"Partially randmoized sampler 1. Sort by lengths 2. Pick a", "y[:, :, :priority_bin]) else: priority_loss = l1(y_hat[:, :, :priority_bin], y[:,", "= mask.expand_as(input) loss = self.criterion(input * mask_, target * mask_)", "= Variable(y.data.new(1).zero_()) else: y_hat_logits = logit(y_hat) z = -y *", "Losses w = hparams.binary_divergence_weight # mel: if train_seq2seq: mel_l1_loss, mel_binary_div", "= checkpoint[\"global_epoch\"] return model def _load_embedding(path, model): state = torch.load(path)[\"state_dict\"]", "y_batch = torch.FloatTensor(c) # text positions text_positions = np.array([_pad(np.arange(1, len(x[0])", "# Permutate batches if self.permutate: perm = np.arange(len(indices[:e]) // self.batch_size)", "lr_schedule_f( init_lr, global_step, **hparams.lr_schedule_kwargs) for param_group in optimizer.param_groups: param_group['lr'] =", "1: mel = mel[:, 0::downsample_step, :].contiguous() # Lengths input_lengths =", "== 5 self.multi_speaker = len(l) == 5 texts = list(map(lambda", "dataset indices = np.array(speaker_ids) == self.speaker_id paths = list(np.array(paths)[indices]) self.frame_lengths", "# [0, 1] spectrogram = (spectrogram - np.min(spectrogram)) / (np.max(spectrogram)", "- w) * l1(y_hat, y) else: assert mask is None", "+ 1, r=hparams.outputs_per_step, downsample_step=hparams.downsample_step, padding_idx=hparams.padding_idx, dropout=hparams.dropout, kernel_size=hparams.kernel_size, encoder_channels=hparams.encoder_channels, decoder_channels=hparams.decoder_channels, converter_channels=hparams.converter_channels,", "= mel_loss + linear_loss + done_loss elif train_seq2seq: loss =", "= hparams.binary_divergence_weight # mel: if train_seq2seq: mel_l1_loss, mel_binary_div = spec_loss(", "X.file_data_source.multi_speaker def __getitem__(self, idx): if self.multi_speaker: text, speaker_id = self.X[idx]", "intermediate states at step {}\".format(global_step)) # idx = np.random.randint(0, len(input_lengths))", "in batch], dtype=np.float32) y_batch = torch.FloatTensor(c) # text positions text_positions", "and global_step % hparams.eval_interval == 0: eval_model(global_step, writer, model, checkpoint_dir,", "--help Show this help message and exit \"\"\" from docopt", "# If not None, filter by speaker_id self.speaker_id = speaker_id", "global_step, sample_rate=fs) except Exception as e: warn(str(e)) pass dv3.audio.save_wav(signal, path)", "nepochs: running_loss = 0. for step, (x, input_lengths, mel, y,", "loss = mel_loss + done_loss elif train_postnet: loss = linear_loss", "event path: {}\".format(log_event_path)) writer = SummaryWriter(log_dir=log_event_path) # Train! try: train(model,", "decoder_target_mask) mel_loss = (1 - w) * mel_l1_loss + w", "True, True if train_seq2seq: print(\"Training seq2seq model\") elif train_postnet: print(\"Training", "self.col = col self.frame_lengths = [] self.speaker_id = speaker_id def", "in range(len(indices) // batch_group_size): s = i * batch_group_size e", "mel[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Target mel spectrogram\", mel_output, global_step) #", "__name__ == \"__main__\": args = docopt(__doc__) print(\"Command line args:\\n\", args)", "is not None: restore_parts(checkpoint_restore_parts, model) # Load checkpoints if checkpoint_postnet_path", "mel_outputs.view(len(mel), -1, mel.size(-1)) linear_outputs = None elif train_postnet: assert speaker_ids", "mask = sequence_mask(lengths, max_len).unsqueeze(-1) # (B, T, D) mask_ =", "# L1 loss if w > 0: assert mask is", "= lr_schedule_f( init_lr, global_step, **hparams.lr_schedule_kwargs) for param_group in optimizer.param_groups: param_group['lr']", "batch_size=hparams.batch_size) # Dataset and Dataloader setup dataset = PyTorchDataset(X, Mel,", "= 0 use_cuda = torch.cuda.is_available() if use_cuda: cudnn.benchmark = False", "z.mean() return l1_loss, binary_div @jit(nopython=True) def guided_attention(N, max_N, T, max_T,", "target_lengths / (r * downsample_step), max_len=mel.size(1)).unsqueeze(-1) if downsample_step > 1:", "hparams, hparams_debug_string fs = hparams.sample_rate global_step = 0 global_epoch =", "--train-postnet-only Train only postnet model. --restore-parts=<path> Restore part of the", "loss = self.criterion(input * mask_, target * mask_) return loss", "if batch_group_size is None: batch_group_size = min(batch_size * 32, len(self.lengths))", "global_step) # Predicted mel spectrogram if mel_outputs is not None:", "loss.data[0] averaged_loss = running_loss / (len(data_loader)) writer.add_scalar(\"loss (per epoch)\", averaged_loss,", "self.sorted_indices = torch.sort(torch.LongTensor(lengths)) self.batch_size = batch_size if batch_group_size is None:", "from: {}\".format(path)) state = torch.load(path)[\"state_dict\"] model_dict = model.state_dict() valid_state_dict =", "dataset definitions X = FileSourceDataset(TextDataSource(data_root, speaker_id)) Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id))", "> 0: assert mask is not None l1_loss = w", "summary writer for tensorboard if log_event_path is None: log_event_path =", "None # Apply model if train_seq2seq and train_postnet: mel_outputs, linear_outputs,", "for speaker_id in speaker_ids: speaker_str = \"multispeaker{}\".format(speaker_id) if speaker_id is", "Restore postnet model from checkpoint path. --train-seq2seq-only Train only seq2seq", "* batch_group_size e = s + batch_group_size random.shuffle(indices[s:e]) # Permutate", "beginning padding # imitates initial decoder states b_pad = r", "suffix)) optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None torch.save({ \"state_dict\":", "loss = linear_loss # attention if train_seq2seq and hparams.use_guided_attention: soft_mask", "* 255), global_step) # Predicted mel spectrogram if mel_outputs is", "while global_epoch < nepochs: running_loss = 0. for step, (x,", "= r max_target_len += b_pad * downsample_step a = np.array([_pad(x[0],", "5 self.multi_speaker = len(l) == 5 texts = list(map(lambda l:", "train_seq2seq, train_postnet) if global_step > 0 and global_step % hparams.eval_interval", "nn.L1Loss() w = hparams.masked_loss_weight # L1 loss if w >", "r == 0 if max_target_len % downsample_step != 0: max_target_len", "% batch_size == 0 self.permutate = permutate def __iter__(self): indices", "y) else: assert mask is None l1_loss = l1(y_hat, y)", "model.seq2seq, optimizer, reset_optimizer) if checkpoint_path is not None: load_checkpoint(checkpoint_path, model,", "in enumerate(texts): signal, alignment, _, mel = synthesis.tts( model, text,", "mel, y = Variable(x), Variable(mel), Variable(y) text_positions = Variable(text_positions) frame_positions", "checkpoint_seq2seq_path is not None: load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer) if checkpoint_path", "Permutate batches if self.permutate: perm = np.arange(len(indices[:e]) // self.batch_size) random.shuffle(perm)", "range(B): W[b] = guided_attention(input_lengths[b], max_input_len, target_lengths[b], max_target_len, g).T return W", "* l1(y_hat, y) else: assert mask is None l1_loss =", "max_len = sequence_length.data.max() batch_size = sequence_length.size(0) seq_range = torch.arange(0, max_len).long()", "or len(l) == 5 self.multi_speaker = len(l) == 5 texts", "y[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Target linear spectrogram\", spectrogram, global_step) def", "spectrogram\", spectrogram, global_step) def logit(x, eps=1e-8): return torch.log(x + eps)", "None: mask = sequence_mask(lengths, max_len).unsqueeze(-1) # (B, T, D) mask_", "self.batch_group_size s, e = 0, 0 for i in range(len(indices)", "[None] for speaker_id in speaker_ids: speaker_str = \"multispeaker{}\".format(speaker_id) if speaker_id", "mel, speaker_ids=speaker_ids, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) elif train_seq2seq: assert speaker_ids is", "max_input_len) for x in batch], dtype=np.int) text_positions = torch.LongTensor(text_positions) max_decoder_target_len", "= max_target_len // r // downsample_step # frame positions s,", "args[\"--restore-parts\"] speaker_id = args[\"--speaker-id\"] speaker_id = int(speaker_id) if speaker_id is", "as f: lines = f.readlines() l = lines[0].decode(\"utf-8\").split(\"|\") assert len(l)", "init_lr binary_criterion = nn.BCELoss() assert train_seq2seq or train_postnet global global_step,", "w) * linear_l1_loss + w * linear_binary_div # Combine losses", "% r != 0: max_target_len += r - max_target_len %", "= mel_outputs[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Predicted mel spectrogram\", mel_output, global_step)", "% r assert max_target_len % r == 0 if max_target_len", "g * g)) return W def guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2):", "mask.expand_as(input) loss = self.criterion(input * mask_, target * mask_) return", "speaker_str) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # Mel writer.add_image(\"(Eval)", "= optimizer.state_dict() if hparams.save_optimizer_state else None torch.save({ \"state_dict\": m.state_dict(), \"optimizer\":", "train_seq2seq: x = x.cuda() text_positions = text_positions.cuda() frame_positions = frame_positions.cuda()", "args[0] seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob) if self.multi_speaker: return np.asarray(seq, dtype=np.int32),", "coded texts = [ \"Scientists at the CERN laboratory say", "plt.savefig(path, format='png') plt.close() class TextDataSource(FileDataSource): def __init__(self, data_root, speaker_id=None): self.data_root", "mask) + (1 - w) * z.mean() else: binary_div =", "is not None: linear_output = linear_outputs[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Predicted", "build_model(): model = getattr(builder, hparams.builder)( n_speakers=hparams.n_speakers, speaker_embed_dim=hparams.speaker_embed_dim, n_vocab=_frontend.n_vocab, embed_dim=hparams.text_embed_dim, mel_dim=hparams.num_mels,", "torch.LongTensor(input_lengths) target_lengths = torch.LongTensor(target_lengths) b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for", "this help message and exit \"\"\" from docopt import docopt", "None mel_outputs, attn, done_hat, _ = model.seq2seq( x, mel, text_positions=text_positions,", "y[:, :, :priority_bin], mask=mask) \\ + (1 - w) *", "spectrogram, global_step) # Predicted audio signal signal = dv3.audio.inv_spectrogram(linear_output.T) signal", "join(self.data_root, \"train.txt\") with open(meta, \"rb\") as f: lines = f.readlines()", "r:, :] else: decoder_target_mask, target_mask = None, None # Apply", "+ 1)) save_alignment(path, alignment) # Save averaged alignment alignment_dir =", "multi_speaker and self.speaker_id is not None: speaker_ids = list(map(lambda l:", "linear_output = y[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Target linear spectrogram\", spectrogram,", "os.path import dirname, join from tqdm import tqdm, trange from", "freeze_embedding=hparams.freeze_embedding, window_ahead=hparams.window_ahead, window_backward=hparams.window_backward, key_projection=hparams.key_projection, value_projection=hparams.value_projection, ) return model def load_checkpoint(path,", "from os.path import join, expanduser import random import librosa.display from", "done_hat = model( x, mel, speaker_ids=speaker_ids, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) elif", "Lengths input_lengths = input_lengths.long().numpy() decoder_lengths = target_lengths.long().numpy() // r //", "linear_l1_loss + w * linear_binary_div # Combine losses if train_seq2seq", "enumerate(texts): signal, alignment, _, mel = synthesis.tts( model, text, p=0,", "downsample_step = hparams.downsample_step current_lr = init_lr binary_criterion = nn.BCELoss() assert", "self.frame_lengths = [] self.speaker_id = speaker_id def collect_files(self): meta =", "max_T), dtype=np.float32) for n in range(N): for t in range(T):", "train_seq2seq and train_postnet: suffix = \"\" m = model elif", "a single speaker dataset indices = np.array(speaker_ids) == self.speaker_id paths", "domain mask decoder_target_mask = sequence_mask( target_lengths / (r * downsample_step),", "linear_output = linear_outputs[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Predicted linear spectrogram\", spectrogram,", "hparams.lr_schedule) current_lr = lr_schedule_f( init_lr, global_step, **hparams.lr_schedule_kwargs) for param_group in", "= Variable(x), Variable(mel), Variable(y) text_positions = Variable(text_positions) frame_positions = Variable(frame_positions)", "args = docopt(__doc__) print(\"Command line args:\\n\", args) checkpoint_dir = args[\"--checkpoint-dir\"]", "done, target_lengths, speaker_ids) \\ in tqdm(enumerate(data_loader)): model.train() ismultispeaker = speaker_ids", "+= '\\n\\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout() plt.savefig(path, format='png')", "self.frame_lengths = list(np.array(self.frame_lengths)[indices]) # aha, need to cast numpy.int64 to", "{}\".format(path)) checkpoint = torch.load(path) model.load_state_dict(checkpoint[\"state_dict\"]) if not reset_optimizer: optimizer_state =", "< nepochs: running_loss = 0. for step, (x, input_lengths, mel,", "interpolation='none') fig.colorbar(im, ax=ax) xlabel = 'Decoder timestep' if info is", "masked_mean(y, mask): # (B, T, D) mask_ = mask.expand_as(y) return", "= hparams.downsample_step current_lr = init_lr binary_criterion = nn.BCELoss() assert train_seq2seq", "/ mask_.sum() def spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0): masked_l1 =", "r // downsample_step - 1), max_decoder_target_len, constant_values=1) for x in", "load_checkpoint(path, model, optimizer, reset_optimizer): global global_step global global_epoch print(\"Load checkpoint", "torch.load(path) model.load_state_dict(checkpoint[\"state_dict\"]) if not reset_optimizer: optimizer_state = checkpoint[\"optimizer\"] if optimizer_state", "Exception as e: warn(str(e)) pass dv3.audio.save_wav(signal, path) # Target mel", "train_postnet=True): if use_cuda: model = model.cuda() linear_dim = model.linear_dim r", "global_step, idx, speaker_str)) save_alignment(path, alignment) tag = \"eval_averaged_alignment_{}_{}\".format(idx, speaker_str) writer.add_image(tag,", "mel_batch, y_batch, \\ (text_positions, frame_positions), done, target_lengths, speaker_ids def time_string():", "'Decoder timestep' if info is not None: xlabel += '\\n\\n'", "z.mean() else: binary_div = z.mean() return l1_loss, binary_div @jit(nopython=True) def", "collate_fn(batch): \"\"\"Create batch\"\"\" r = hparams.outputs_per_step downsample_step = hparams.downsample_step multi_speaker", "0: binary_div = Variable(y.data.new(1).zero_()) else: y_hat_logits = logit(y_hat) z =", "global_step % checkpoint_interval == 0: save_states( global_step, writer, mel_outputs, linear_outputs,", "if b_pad > 0: # s, e = s -", "len(self.lengths)) if batch_group_size % batch_size != 0: batch_group_size -= batch_group_size", "specific speaker of data in case for multi-speaker datasets. -h,", "model.cuda() linear_dim = model.linear_dim r = hparams.outputs_per_step downsample_step = hparams.downsample_step", "= mel[:, 0::downsample_step, :].contiguous() # Lengths input_lengths = input_lengths.long().numpy() decoder_lengths", "masked_mean(z, mask) + (1 - w) * z.mean() else: binary_div", "if train_seq2seq and train_postnet: suffix = \"\" m = model", "global global_step global global_epoch print(\"Load checkpoint from: {}\".format(path)) checkpoint =", "torch.FloatTensor(done).unsqueeze(-1) if multi_speaker: speaker_ids = torch.LongTensor([x[3] for x in batch])", "nn.BCELoss() assert train_seq2seq or train_postnet global global_step, global_epoch while global_epoch", "> 1: # spectrogram-domain mask target_mask = sequence_mask( target_lengths, max_len=y.size(1)).unsqueeze(-1)", ":] else: decoder_target_mask, target_mask = None, None # Apply model", "sequence_mask( target_lengths / (r * downsample_step), max_len=mel.size(1)).unsqueeze(-1) if downsample_step >", "max_len - len(seq)), mode='constant', constant_values=constant_values) def _pad_2d(x, max_len, b_pad=0): x", "writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # save files as", "dtype=np.float32) y_batch = torch.FloatTensor(c) # text positions text_positions = np.array([_pad(np.arange(1,", "model, checkpoint_dir, ismultispeaker): # harded coded texts = [ \"Scientists", "0: writer.add_scalar(\"gradient norm\", grad_norm, global_step) writer.add_scalar(\"learning rate\", current_lr, global_step) global_step", "collect_files(self): meta = join(self.data_root, \"train.txt\") with open(meta, \"rb\") as f:", "PyTorchDataset(X, Mel, Y) data_loader = data_utils.DataLoader( dataset, batch_size=hparams.batch_size, num_workers=hparams.num_workers, sampler=sampler,", "// downsample_step # Feed data x, mel, y = Variable(x),", "= checkpoint[\"global_step\"] global_epoch = checkpoint[\"global_epoch\"] return model def _load_embedding(path, model):", "f: lines = f.readlines() l = lines[0].decode(\"utf-8\").split(\"|\") assert len(l) ==", "--checkpoint=<path> Restore model from checkpoint path if given. --checkpoint-seq2seq=<path> Restore", "X self.Mel = Mel self.Y = Y # alias self.multi_speaker", "# Apply model if train_seq2seq and train_postnet: mel_outputs, linear_outputs, attn,", "timestep' if info is not None: xlabel += '\\n\\n' +", "1).T) * 255), global_step) # save files as well for", "be set later def _pad(seq, max_len, constant_values=0): return np.pad(seq, (0,", "states at step {}\".format(global_step)) # idx = np.random.randint(0, len(input_lengths)) idx", "mel, y, input_lengths, checkpoint_dir=None): print(\"Save intermediate states at step {}\".format(global_step))", "join(alignment_dir, \"step{:09d}_layer_{}_alignment.png\".format( global_step, i + 1)) save_alignment(path, alignment) # Save", "dv3.synthesis synthesis._frontend = _frontend eval_output_dir = join(checkpoint_dir, \"eval\") os.makedirs(eval_output_dir, exist_ok=True)", "mel = mel.cuda() done, target_lengths = done.cuda(), target_lengths.cuda() speaker_ids =", "restore_parts(path, model): print(\"Restore part of the model from: {}\".format(path)) state", "= args[\"--restore-parts\"] speaker_id = args[\"--speaker-id\"] speaker_id = int(speaker_id) if speaker_id", "Dataloader setup dataset = PyTorchDataset(X, Mel, Y) data_loader = data_utils.DataLoader(", "writer.add_scalar(\"gradient norm\", grad_norm, global_step) writer.add_scalar(\"learning rate\", current_lr, global_step) global_step +=", "if load_embedding is not None: print(\"Loading embedding from {}\".format(load_embedding)) _load_embedding(load_embedding,", "int(l.decode(\"utf-8\").split(\"|\")[2]), lines)) paths = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[self.col], lines)) paths =", "max_target_len += downsample_step - max_target_len % downsample_step assert max_target_len %", "return texts def collect_features(self, *args): if self.multi_speaker: text, speaker_id =", "downsample_step # frame positions s, e = 1, max_decoder_target_len +", "speaker of data in case for multi-speaker datasets. -h, --help", "0: # s, e = s - 1, e -", "datetime # The deepvoice3 model from dv3.deepvoice3_pytorch import frontend, builder", "None else \"single\" for idx, text in enumerate(texts): signal, alignment,", "running_loss / (len(data_loader)) writer.add_scalar(\"loss (per epoch)\", averaged_loss, global_epoch) print(\"Loss: {}\".format(running_loss", "have discovered a new particle.\", \"There's a way to measure", "key_projection=hparams.key_projection, value_projection=hparams.value_projection, ) return model def load_checkpoint(path, model, optimizer, reset_optimizer):", "if checkpoint_seq2seq_path is not None: load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer) if", "list( map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[2]), lines)) paths = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[self.col],", "model. usage: train.py [options] options: --data-root=<dir> Directory contains preprocessed features.", "= w * masked_l1(y_hat, y, mask=mask) + (1 - w)", "weight_decay=hparams.weight_decay) if checkpoint_restore_parts is not None: restore_parts(checkpoint_restore_parts, model) # Load", "y, input_lengths, checkpoint_dir=None): print(\"Save intermediate states at step {}\".format(global_step)) #", "self.data_root = data_root self.col = col self.frame_lengths = [] self.speaker_id", "- w) * mel_l1_loss + w * mel_binary_div # done:", "batch_group_size assert batch_group_size % batch_size == 0 self.permutate = permutate", "_frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob) if self.multi_speaker: return np.asarray(seq, dtype=np.int32), int(speaker_id) else: return", "raise RuntimeError(\"Should provide either lengths or mask\") # (B, T,", "# harded coded texts = [ \"Scientists at the CERN", "e = 1, max_decoder_target_len + 1 # if b_pad >", "self.criterion(input * mask_, target * mask_) return loss / mask_.sum()", "Variable(y) text_positions = Variable(text_positions) frame_positions = Variable(frame_positions) done = Variable(done)", "= list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[3], lines)) if self.multi_speaker: speaker_ids = list(map(lambda", "y, mask=mask) + (1 - w) * l1(y_hat, y) else:", "\"step{:09d}_text{}_{}_predicted.wav\".format( global_step, idx, speaker_str)) dv3.audio.save_wav(signal, path) try: writer.add_audio(\"(Eval) Predicted audio", "optimizer, step, checkpoint_dir, epoch, train_seq2seq, train_postnet): if train_seq2seq and train_postnet:", "accepted this as a miracle without any physical explanation.\", ]", "(2 * g * g)) return W def guided_attentions(input_lengths, target_lengths,", "1, 10] if ismultispeaker else [None] for speaker_id in speaker_ids:", "Mel self.Y = Y # alias self.multi_speaker = X.file_data_source.multi_speaker def", "if ismultispeaker else None # Create mask if we use", "assert len(l) == 4 or len(l) == 5 self.multi_speaker =", "is None: batch_group_size = min(batch_size * 32, len(self.lengths)) if batch_group_size", "tag = \"averaged_alignment\" writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) #", "Y): self.X = X self.Mel = Mel self.Y = Y", "0, 0 for i in range(len(indices) // batch_group_size): s =", "{}\".format(running_loss / (len(data_loader)))) global_epoch += 1 def save_checkpoint(model, optimizer, step,", "= X self.Mel = Mel self.Y = Y # alias", "x.cuda() text_positions = text_positions.cuda() frame_positions = frame_positions.cuda() if train_postnet: y", "text{}_{}\".format(idx, speaker_str), prepare_spec_image(mel), global_step) # Audio path = join(eval_output_dir, \"step{:09d}_text{}_{}_predicted.wav\".format(", "_load_embedding(load_embedding, model) # Setup summary writer for tensorboard if log_event_path", "False, \"must be specified wrong args\" # Override hyper parameters", "self.multi_speaker: text, speaker_id = self.X[idx] return text, self.Mel[idx], self.Y[idx], speaker_id", "positions text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len) for x", "exist_ok=True) # Input dataset definitions X = FileSourceDataset(TextDataSource(data_root, speaker_id)) Mel", "param_group['lr'] = current_lr optimizer.zero_grad() # Used for Position encoding text_positions,", "indices[:e].view(-1, self.batch_size)[perm, :].view(-1) # Handle last elements s += batch_group_size", "soft_mask = guided_attentions(input_lengths, decoder_lengths, attn.size(-2), g=hparams.guided_attention_sigma) soft_mask = Variable(torch.from_numpy(soft_mask)) soft_mask", "- w) * linear_l1_loss + w * linear_binary_div # Combine", "= join( checkpoint_dir, \"checkpoint_step{:09d}{}.pth\".format(global_step, suffix)) optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state", "batch]) done = torch.FloatTensor(done).unsqueeze(-1) if multi_speaker: speaker_ids = torch.LongTensor([x[3] for", "_ = model.seq2seq( x, mel, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) # reshape", "# Setup summary writer for tensorboard if log_event_path is None:", "valid_state_dict = {k: v for k, v in state.items() if", "xlabel = 'Decoder timestep' if info is not None: xlabel", "json hparams.parse_json(json.dumps(preset)) print(\"Override hyper parameters with preset \\\"{}\\\": {}\".format( hparams.preset,", "self.Mel[idx], self.Y[idx], speaker_id else: return self.X[idx], self.Mel[idx], self.Y[idx] def __len__(self):", "single speaker dataset if self.speaker_id is not None: indices =", "embedding if load_embedding is not None: print(\"Loading embedding from {}\".format(load_embedding))", "kernel_size=hparams.kernel_size, encoder_channels=hparams.encoder_channels, decoder_channels=hparams.decoder_channels, converter_channels=hparams.converter_channels, use_memory_mask=hparams.use_memory_mask, trainable_positional_encodings=hparams.trainable_positional_encodings, force_monotonic_attention=hparams.force_monotonic_attention, use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input, max_positions=hparams.max_positions, speaker_embedding_weight_std=hparams.speaker_embedding_weight_std,", "= np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int) x_batch =", "randomize it 3. Permutate mini-batchs \"\"\" def __init__(self, lengths, batch_size=16,", "hparams.save_optimizer_state else None torch.save({ \"state_dict\": m.state_dict(), \"optimizer\": optimizer_state, \"global_step\": step,", "speaker_id=None): self.data_root = data_root self.col = col self.frame_lengths = []", "self.batch_group_size = batch_group_size assert batch_group_size % batch_size == 0 self.permutate", "/ (r * downsample_step), max_len=mel.size(1)).unsqueeze(-1) if downsample_step > 1: #", "batch_size=hparams.batch_size, num_workers=hparams.num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=hparams.pin_memory) print(\"dataloader_prepared\") # Model model =", "linear_outputs is not None: linear_output = linear_outputs[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output))", "assert mask is None l1_loss = l1(y_hat, y) # Priority", "indices = self.sorted_indices.clone() batch_group_size = self.batch_group_size s, e = 0,", "embed_dim=hparams.text_embed_dim, mel_dim=hparams.num_mels, linear_dim=hparams.fft_size // 2 + 1, r=hparams.outputs_per_step, downsample_step=hparams.downsample_step, padding_idx=hparams.padding_idx,", "train_seq2seq: print(\"Training seq2seq model\") elif train_postnet: print(\"Training postnet model\") else:", "for zero beginning padding # imitates initial decoder states b_pad", "Lengths input_lengths = [len(x[0]) for x in batch] max_input_len =", "class TextDataSource(FileDataSource): def __init__(self, data_root, speaker_id=None): self.data_root = data_root self.speaker_ids", "* mask_).sum() / mask_.sum() def spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0):", "global_epoch += 1 def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch, train_seq2seq,", "sequence_length.unsqueeze(1) \\ .expand_as(seq_range_expand) return (seq_range_expand < seq_length_expand).float() class MaskedL1Loss(nn.Module): def", "# Create mask if we use masked loss if hparams.masked_loss_weight", "global_epoch print(\"Load checkpoint from: {}\".format(path)) checkpoint = torch.load(path) model.load_state_dict(checkpoint[\"state_dict\"]) if", "= target_lengths.long().numpy() // r // downsample_step # Feed data x,", "done = torch.FloatTensor(done).unsqueeze(-1) if multi_speaker: speaker_ids = torch.LongTensor([x[3] for x", "target_lengths, max_len=y.size(1)).unsqueeze(-1) else: target_mask = decoder_target_mask # shift mask decoder_target_mask", "exist_ok=True) path = join(alignment_dir, \"step{:09d}_layer_{}_alignment.png\".format( global_step, i + 1)) save_alignment(path,", "in range(T): W[n, t] = 1 - np.exp(-(n / N", "path = join(eval_output_dir, \"step{:09d}_text{}_{}_predicted.wav\".format( global_step, idx, speaker_str)) dv3.audio.save_wav(signal, path) try:", "mask decoder_target_mask = sequence_mask( target_lengths / (r * downsample_step), max_len=mel.size(1)).unsqueeze(-1)", "% r == 0 if max_target_len % downsample_step != 0:", "frame_lengths = Mel.file_data_source.frame_lengths sampler = PartialyRandomizedSimilarTimeLengthSampler( frame_lengths, batch_size=hparams.batch_size) # Dataset", "path = join(checkpoint_dir, \"step{:09d}_predicted.wav\".format( global_step)) try: writer.add_audio(\"Predicted audio signal\", signal,", "if train_seq2seq: x = x.cuda() text_positions = text_positions.cuda() frame_positions =", "load_embedding is not None: print(\"Loading embedding from {}\".format(load_embedding)) _load_embedding(load_embedding, model)", "input_lengths.long().numpy() decoder_lengths = target_lengths.long().numpy() // r // downsample_step # Feed", "= len(l) == 5 texts = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[3], lines))", "speaker_id self.speaker_id = speaker_id def collect_files(self): meta = join(self.data_root, \"train.txt\")", "np.array([_pad(np.zeros(len(x[1]) // r // downsample_step - 1), max_decoder_target_len, constant_values=1) for", "writer.add_scalar(\"attn_loss\", float(attn_loss.data[0]), global_step) if clip_thresh > 0: writer.add_scalar(\"gradient norm\", grad_norm,", "= int(speaker_id) if speaker_id is not None else None data_root", "__getitem__(self, idx): if self.multi_speaker: text, speaker_id = self.X[idx] return text,", "checkpoint_dir=None, checkpoint_interval=None, nepochs=None, clip_thresh=1.0, train_seq2seq=True, train_postnet=True): if use_cuda: model =", "s, e = s - 1, e - 1 frame_positions", "indices = np.array(speaker_ids) == self.speaker_id paths = list(np.array(paths)[indices]) self.frame_lengths =", "Exception as e: warn(str(e)) pass def save_states(global_step, writer, mel_outputs, linear_outputs,", "synthesis model. usage: train.py [options] options: --data-root=<dir> Directory contains preprocessed", "% downsample_step == 0 # Set 0 for zero beginning", "import datetime # The deepvoice3 model from dv3.deepvoice3_pytorch import frontend,", "with open(meta, \"rb\") as f: lines = f.readlines() l =", "global_step += 1 running_loss += loss.data[0] averaged_loss = running_loss /", "== \"deepvoice3\" # Presets if hparams.preset is not None and", "spectrogram text{}_{}\".format(idx, speaker_str), prepare_spec_image(mel), global_step) # Audio path = join(eval_output_dir,", "import torch from torch.utils import data as data_utils from torch.autograd", "= data_utils.DataLoader( dataset, batch_size=hparams.batch_size, num_workers=hparams.num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=hparams.pin_memory) print(\"dataloader_prepared\") #", "Stella.\", \"Some have accepted this as a miracle without any", "\"alignment_layer{}\".format(i + 1)) os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, \"step{:09d}_layer_{}_alignment.png\".format( global_step,", "priority_bin is not None and priority_w > 0: if w", "tqdm import tqdm, trange from datetime import datetime # The", "= speaker_id def collect_files(self): meta = join(self.data_root, \"train.txt\") with open(meta,", "// r // downsample_step - 1), max_decoder_target_len, constant_values=1) for x", "= 0, 0 for i in range(len(indices) // batch_group_size): s", "(seq_range_expand < seq_length_expand).float() class MaskedL1Loss(nn.Module): def __init__(self): super(MaskedL1Loss, self).__init__() self.criterion", "- max_target_len % downsample_step assert max_target_len % downsample_step == 0", "max_len, b_pad=0): x = np.pad(x, [(b_pad, max_len - len(x) -", "current_lr, global_step) global_step += 1 running_loss += loss.data[0] averaged_loss =", "> 0: grad_norm = torch.nn.utils.clip_grad_norm( model.get_trainable_parameters(), clip_thresh) optimizer.step() # Logs", "max_len is None: max_len = sequence_length.data.max() batch_size = sequence_length.size(0) seq_range", "= torch.LongTensor(input_lengths) target_lengths = torch.LongTensor(target_lengths) b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad)", "train both if not specified if not train_seq2seq and not", "measure the acute emotional intelligence that has never gone out", "1).T) * 255), global_step) # Predicted mel spectrogram if mel_outputs", "tqdm, trange from datetime import datetime # The deepvoice3 model", "# Update loss.backward() if clip_thresh > 0: grad_norm = torch.nn.utils.clip_grad_norm(", "for k, v in state.items() if k in model_dict} model_dict.update(valid_state_dict)", "hparams.binary_divergence_weight <= 0: binary_div = Variable(y.data.new(1).zero_()) else: y_hat_logits = logit(y_hat)", "in enumerate(attn): alignment = alignment[idx].cpu().data.numpy() tag = \"alignment_layer{}\".format(i + 1)", "0: assert mask is not None l1_loss = w *", "= alignment[idx].cpu().data.numpy() tag = \"alignment_layer{}\".format(i + 1) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T)", "def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker): # harded coded texts", "in state.items() if k in model_dict} model_dict.update(valid_state_dict) model.load_state_dict(model_dict) if __name__", "/ (len(data_loader)))) global_epoch += 1 def save_checkpoint(model, optimizer, step, checkpoint_dir,", "model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet) if global_step >", "global global_step, global_epoch while global_epoch < nepochs: running_loss = 0.", "args) checkpoint_dir = args[\"--checkpoint-dir\"] checkpoint_path = args[\"--checkpoint\"] checkpoint_seq2seq_path = args[\"--checkpoint-seq2seq\"]", "if ismultispeaker else None if use_cuda: if train_seq2seq: x =", "not None: restore_parts(checkpoint_restore_parts, model) # Load checkpoints if checkpoint_postnet_path is", "// downsample_step - 1), max_decoder_target_len, constant_values=1) for x in batch])", "torch.autograd import Variable from torch import nn from torch import", "decoder_target_mask = decoder_target_mask[:, r:, :] target_mask = target_mask[:, r:, :]", "args[\"--checkpoint-postnet\"] load_embedding = args[\"--load-embedding\"] checkpoint_restore_parts = args[\"--restore-parts\"] speaker_id = args[\"--speaker-id\"]", ":, :priority_bin]) else: priority_loss = l1(y_hat[:, :, :priority_bin], y[:, :,", "dv3.audio import dv3.lrschedule import torch from torch.utils import data as", "parameters with preset \\\"{}\\\": {}\".format( hparams.preset, json.dumps(preset, indent=4))) _frontend =", "return texts, speaker_ids else: return texts def collect_features(self, *args): if", "batch_size self.batch_group_size = batch_group_size assert batch_group_size % batch_size == 0", "ismultispeaker else [None] for speaker_id in speaker_ids: speaker_str = \"multispeaker{}\".format(speaker_id)", "target_lengths[b], max_target_len, g).T return W def train(model, data_loader, optimizer, writer,", "conference.\", \"Generative adversarial network or variational auto-encoder.\", \"Please call Stella.\",", "checkpoint[\"global_step\"] global_epoch = checkpoint[\"global_epoch\"] return model def _load_embedding(path, model): state", "y) # Priority L1 loss if priority_bin is not None", "= self.criterion(input * mask_, target * mask_) return loss /", "mel spectrogram text{}_{}\".format(idx, speaker_str), prepare_spec_image(mel), global_step) # Audio path =", "Predicted mel spectrogram if mel_outputs is not None: mel_output =", "= nn.L1Loss(size_average=False) def forward(self, input, target, lengths=None, mask=None, max_len=None): if", "self.Mel = Mel self.Y = Y # alias self.multi_speaker =", "\"Generative adversarial network or variational auto-encoder.\", \"Please call Stella.\", \"Some", "\\ + (1 - w) * l1(y_hat[:, :, :priority_bin], y[:,", "mel.cuda() done, target_lengths = done.cuda(), target_lengths.cuda() speaker_ids = speaker_ids.cuda() if", "batch_size = sequence_length.size(0) seq_range = torch.arange(0, max_len).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size,", "sequence_mask(sequence_length, max_len=None): if max_len is None: max_len = sequence_length.data.max() batch_size", "# Used for Position encoding text_positions, frame_positions = positions #", "else: assert False, \"must be specified wrong args\" # Override", "speaker_id else: return self.X[idx], self.Mel[idx], self.Y[idx] def __len__(self): return len(self.X)", "by lengths 2. Pick a small patch and randomize it", "if max_len is None: max_len = sequence_length.data.max() batch_size = sequence_length.size(0)", "import Sampler import numpy as np from numba import jit", "0 and global_step % hparams.eval_interval == 0: eval_model(global_step, writer, model,", "// r // downsample_step # frame positions s, e =", "= hparams.masked_loss_weight # L1 loss if w > 0: assert", "not None: lr_schedule_f = getattr(dv3.lrschedule, hparams.lr_schedule) current_lr = lr_schedule_f( init_lr,", "import librosa.display from matplotlib import pyplot as plt import sys", "has never gone out of style.\", \"President Trump met with", "def __init__(self, X, Mel, Y): self.X = X self.Mel =", "= join(checkpoint_dir, \"alignment_layer{}\".format(i + 1)) os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir,", "speaker_id = int(speaker_id) if speaker_id is not None else None", "mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1)) linear_outputs = None elif train_postnet:", "v for k, v in state.items() if k in model_dict}", "model.load_state_dict(checkpoint[\"state_dict\"]) if not reset_optimizer: optimizer_state = checkpoint[\"optimizer\"] if optimizer_state is", "\"eval\") os.makedirs(eval_output_dir, exist_ok=True) # hard coded speaker_ids = [0, 1,", "idx, speaker_str)) save_alignment(path, alignment) tag = \"eval_averaged_alignment_{}_{}\".format(idx, speaker_str) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment,", "mask_.sum() def collate_fn(batch): \"\"\"Create batch\"\"\" r = hparams.outputs_per_step downsample_step =", "import warn from dv3.hparams import hparams, hparams_debug_string fs = hparams.sample_rate", "= np.array(speaker_ids) == self.speaker_id paths = list(np.array(paths)[indices]) self.frame_lengths = list(np.array(self.frame_lengths)[indices])", "model\") train_seq2seq, train_postnet = True, True if train_seq2seq: print(\"Training seq2seq", "train_postnet: suffix = \"\" m = model elif train_seq2seq: suffix", "= _frontend eval_output_dir = join(checkpoint_dir, \"eval\") os.makedirs(eval_output_dir, exist_ok=True) # hard", "= Variable(torch.from_numpy(soft_mask)) soft_mask = soft_mask.cuda() if use_cuda else soft_mask attn_loss", "target_lengths = torch.LongTensor(target_lengths) b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x", "spectrogram = np.flip(spectrogram, axis=1) # flip against freq axis return", "PyTorchDataset(object): def __init__(self, X, Mel, Y): self.X = X self.Mel", "X, Mel, Y): self.X = X self.Mel = Mel self.Y", "checkpoint_dir) save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet) if", "priority_w * priority_loss # Binary divergence loss if hparams.binary_divergence_weight <=", "b_pad > 0: # s, e = s - 1,", "priority_w=0): masked_l1 = MaskedL1Loss() l1 = nn.L1Loss() w = hparams.masked_loss_weight", "args[\"--checkpoint-dir\"] checkpoint_path = args[\"--checkpoint\"] checkpoint_seq2seq_path = args[\"--checkpoint-seq2seq\"] checkpoint_postnet_path = args[\"--checkpoint-postnet\"]", "if ismultispeaker else [None] for speaker_id in speaker_ids: speaker_str =", "< seq_length_expand).float() class MaskedL1Loss(nn.Module): def __init__(self): super(MaskedL1Loss, self).__init__() self.criterion =", "from torch.utils.data.sampler import Sampler import numpy as np from numba", "mask_, target * mask_) return loss / mask_.sum() def collate_fn(batch):", "if optimizer_state is not None: print(\"Load optimizer state from {}\".format(path))", "join(eval_output_dir, \"step{:09d}_text{}_{}_alignment.png\".format( global_step, idx, speaker_str)) save_alignment(path, alignment) tag = \"eval_averaged_alignment_{}_{}\".format(idx,", "speaker_id is not None else None data_root = args[\"--data-root\"] if", "= build_model() if use_cuda: model = model.cuda() optimizer = optim.Adam(model.get_trainable_parameters(),", "global_step) # save files as well for now alignment_dir =", "if self.speaker_id is not None: indices = np.array(speaker_ids) == self.speaker_id", "0: max_target_len += downsample_step - max_target_len % downsample_step assert max_target_len", "dtype=np.float32) for b in range(B): W[b] = guided_attention(input_lengths[b], max_input_len, target_lengths[b],", "import data as data_utils from torch.utils.data.sampler import Sampler import numpy", "b_pad), (0, 0)], mode=\"constant\", constant_values=0) return x def plot_alignment(alignment, path,", "# alias self.multi_speaker = X.file_data_source.multi_speaker def __getitem__(self, idx): if self.multi_speaker:", "= MaskedL1Loss() l1 = nn.L1Loss() w = hparams.masked_loss_weight # L1", "assert train_seq2seq or train_postnet global global_step, global_epoch while global_epoch <", "and train_postnet: mel_outputs, linear_outputs, attn, done_hat = model( x, mel,", "not None: print(\"Load optimizer state from {}\".format(path)) optimizer.load_state_dict(checkpoint[\"optimizer\"]) global_step =", "to be trained train_seq2seq = args[\"--train-seq2seq-only\"] train_postnet = args[\"--train-postnet-only\"] #", "use_cuda: model = model.cuda() optimizer = optim.Adam(model.get_trainable_parameters(), lr=hparams.initial_learning_rate, betas=( hparams.adam_beta1,", "# reshape mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1)) linear_outputs = None", "return np.asarray(seq, dtype=np.int32) class _NPYDataSource(FileDataSource): def __init__(self, data_root, col, speaker_id=None):", "# done: if train_seq2seq: done_loss = binary_criterion(done_hat, done) # linear:", "lines)) # Filter by speaker_id # using multi-speaker dataset as", "= FileSourceDataset(TextDataSource(data_root, speaker_id)) Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id)) Y = FileSourceDataset(LinearSpecDataSource(data_root,", "--checkpoint-dir=<dir> Directory where to save model checkpoints [default: checkpoints]. --hparams=<parmas>", "// self.batch_size) random.shuffle(perm) indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1) # Handle", "None # Create mask if we use masked loss if", "fast=False) signal /= np.max(np.abs(signal)) # Alignment path = join(eval_output_dir, \"step{:09d}_text{}_{}_alignment.png\".format(", "mel, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) # reshape mel_outputs = mel_outputs.view(len(mel), -1,", "x, mel, y = Variable(x), Variable(mel), Variable(y) text_positions = Variable(text_positions)", "# Losses w = hparams.binary_divergence_weight # mel: if train_seq2seq: mel_l1_loss,", "l = lines[0].decode(\"utf-8\").split(\"|\") assert len(l) == 4 or len(l) ==", "Prepare sampler frame_lengths = Mel.file_data_source.frame_lengths sampler = PartialyRandomizedSimilarTimeLengthSampler( frame_lengths, batch_size=hparams.batch_size)", "5 multi_speaker = len(l) == 5 self.frame_lengths = list( map(lambda", "global_step) if train_postnet: writer.add_scalar(\"linear_loss\", float(linear_loss.data[0]), global_step) writer.add_scalar(\"linear_l1_loss\", float(linear_l1_loss.data[0]), global_step) writer.add_scalar(\"linear_binary_div_loss\",", "clip_thresh=hparams.clip_thresh, train_seq2seq=train_seq2seq, train_postnet=train_postnet) except KeyboardInterrupt: save_checkpoint( model, optimizer, global_step, checkpoint_dir,", "= docopt(__doc__) print(\"Command line args:\\n\", args) checkpoint_dir = args[\"--checkpoint-dir\"] checkpoint_path", "hparams.lr_schedule is not None: lr_schedule_f = getattr(dv3.lrschedule, hparams.lr_schedule) current_lr =", "self.multi_speaker = X.file_data_source.multi_speaker def __getitem__(self, idx): if self.multi_speaker: text, speaker_id", "{}\".format( hparams.preset, json.dumps(preset, indent=4))) _frontend = getattr(frontend, hparams.frontend) os.makedirs(checkpoint_dir, exist_ok=True)", "load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer) # Load embedding if load_embedding is", "tensorboardX import SummaryWriter from matplotlib import cm from warnings import", "not train_postnet: print(\"Training whole model\") train_seq2seq, train_postnet = True, True", "optimizer, reset_optimizer): global global_step global global_epoch print(\"Load checkpoint from: {}\".format(path))", "Model model = build_model() if use_cuda: model = model.cuda() optimizer", "r = hparams.outputs_per_step downsample_step = hparams.downsample_step multi_speaker = len(batch[0]) ==", "as a single speaker dataset if self.speaker_id is not None:", "r:, :], target_mask, priority_bin=n_priority_freq, priority_w=hparams.priority_freq_weight) linear_loss = (1 - w)", "random.shuffle(indices[s:e]) # Permutate batches if self.permutate: perm = np.arange(len(indices[:e]) //", "checkpoint_path = args[\"--checkpoint\"] checkpoint_seq2seq_path = args[\"--checkpoint-seq2seq\"] checkpoint_postnet_path = args[\"--checkpoint-postnet\"] load_embedding", "(attn * soft_mask).mean() loss += attn_loss if global_step > 0", "join( checkpoint_dir, \"checkpoint_step{:09d}{}.pth\".format(global_step, suffix)) optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else", "(B, T, D) mask_ = mask.expand_as(y) return (y * mask_).sum()", "checkpoints]. --hparams=<parmas> Hyper parameters [default: ]. --checkpoint=<path> Restore model from", "eps=hparams.adam_eps, weight_decay=hparams.weight_decay) if checkpoint_restore_parts is not None: restore_parts(checkpoint_restore_parts, model) #", "* 255), global_step) # Mel writer.add_image(\"(Eval) Predicted mel spectrogram text{}_{}\".format(idx,", "None: load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer) if checkpoint_seq2seq_path is not None:", "soft_mask attn_loss = (attn * soft_mask).mean() loss += attn_loss if", "not None: print(\"Loading embedding from {}\".format(load_embedding)) _load_embedding(load_embedding, model) # Setup", "4 # Lengths input_lengths = [len(x[0]) for x in batch]", "map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[2]), lines)) paths = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[self.col], lines))", "# decoder output domain mask decoder_target_mask = sequence_mask( target_lengths /", "# Handle last elements s += batch_group_size if s <", "np.random.randint(0, len(input_lengths)) idx = min(1, len(input_lengths) - 1) input_length =", "hparams.preset, json.dumps(preset, indent=4))) _frontend = getattr(frontend, hparams.frontend) os.makedirs(checkpoint_dir, exist_ok=True) #", "[ \"Scientists at the CERN laboratory say they have discovered", "# frame positions s, e = 1, max_decoder_target_len + 1", "{}\".format(path)) optimizer.load_state_dict(checkpoint[\"optimizer\"]) global_step = checkpoint[\"global_step\"] global_epoch = checkpoint[\"global_epoch\"] return model", "def collect_features(self, *args): if self.multi_speaker: text, speaker_id = args else:", "train_seq2seq: suffix = \"_seq2seq\" m = model.seq2seq elif train_postnet: suffix", "checkpoint_restore_parts = args[\"--restore-parts\"] speaker_id = args[\"--speaker-id\"] speaker_id = int(speaker_id) if", "Y) data_loader = data_utils.DataLoader( dataset, batch_size=hparams.batch_size, num_workers=hparams.num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=hparams.pin_memory)", "model = model.cuda() linear_dim = model.linear_dim r = hparams.outputs_per_step downsample_step", "batches if self.permutate: perm = np.arange(len(indices[:e]) // self.batch_size) random.shuffle(perm) indices[:e]", "clip_thresh > 0: grad_norm = torch.nn.utils.clip_grad_norm( model.get_trainable_parameters(), clip_thresh) optimizer.step() #", "print(\"Load checkpoint from: {}\".format(path)) checkpoint = torch.load(path) model.load_state_dict(checkpoint[\"state_dict\"]) if not", "batch_group_size random.shuffle(indices[s:e]) # Permutate batches if self.permutate: perm = np.arange(len(indices[:e])", "binary_div = w * masked_mean(z, mask) + (1 - w)", "= \"averaged_alignment\" writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # Predicted", "done flags done = np.array([_pad(np.zeros(len(x[1]) // r // downsample_step -", "global_step > 0 and global_step % hparams.eval_interval == 0: eval_model(global_step,", "def sequence_mask(sequence_length, max_len=None): if max_len is None: max_len = sequence_length.data.max()", "seq_range = torch.arange(0, max_len).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand =", "not None: mel_output = mel[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Target mel", "for multi-speaker datasets. -h, --help Show this help message and", "attn): plot_alignment(attn.T, path, info=\"{}, {}, step={}\".format( hparams.builder, time_string(), global_step)) def", "= {k: v for k, v in state.items() if k", "// 2 + 1, r=hparams.outputs_per_step, downsample_step=hparams.downsample_step, padding_idx=hparams.padding_idx, dropout=hparams.dropout, kernel_size=hparams.kernel_size, encoder_channels=hparams.encoder_channels,", "writer.add_scalar(\"mel loss\", float(mel_loss.data[0]), global_step) writer.add_scalar(\"mel_l1_loss\", float(mel_l1_loss.data[0]), global_step) writer.add_scalar(\"mel_binary_div_loss\", float(mel_binary_div.data[0]), global_step)", "Target spectrogram if linear_outputs is not None: linear_output = y[idx].cpu().data.numpy()", "0: # decoder output domain mask decoder_target_mask = sequence_mask( target_lengths", "+ 1) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # save", "len(self.sorted_indices) class PyTorchDataset(object): def __init__(self, X, Mel, Y): self.X =", "# Alignment # Multi-hop attention if attn is not None", "mel spectrogram\", mel_output, global_step) # Predicted spectrogram if linear_outputs is", "masked_l1 = MaskedL1Loss() l1 = nn.L1Loss() w = hparams.masked_loss_weight #", "Downsample mel spectrogram if downsample_step > 1: mel = mel[:,", "writer, init_lr=0.002, checkpoint_dir=None, checkpoint_interval=None, nepochs=None, clip_thresh=1.0, train_seq2seq=True, train_postnet=True): if use_cuda:", "TextDataSource(FileDataSource): def __init__(self, data_root, speaker_id=None): self.data_root = data_root self.speaker_ids =", "else: return np.asarray(seq, dtype=np.int32) class _NPYDataSource(FileDataSource): def __init__(self, data_root, col,", "dataset as a single speaker dataset indices = np.array(speaker_ids) ==", "hparams.builder, time_string(), global_step)) def prepare_spec_image(spectrogram): # [0, 1] spectrogram =", "of 20 conference.\", \"Generative adversarial network or variational auto-encoder.\", \"Please", "def masked_mean(y, mask): # (B, T, D) mask_ = mask.expand_as(y)", "mask_ = mask.expand_as(y) return (y * mask_).sum() / mask_.sum() def", "+ eps) def masked_mean(y, mask): # (B, T, D) mask_", "pin_memory=hparams.pin_memory) print(\"dataloader_prepared\") # Model model = build_model() if use_cuda: model", "- priority_w) * l1_loss + priority_w * priority_loss # Binary", "= model.cuda() optimizer = optim.Adam(model.get_trainable_parameters(), lr=hparams.initial_learning_rate, betas=( hparams.adam_beta1, hparams.adam_beta2), eps=hparams.adam_eps,", "is not None and hparams.preset != \"\": preset = hparams.presets[hparams.preset]", "value_projection=hparams.value_projection, ) return model def load_checkpoint(path, model, optimizer, reset_optimizer): global", "done_loss elif train_seq2seq: loss = mel_loss + done_loss elif train_postnet:", "1 running_loss += loss.data[0] averaged_loss = running_loss / (len(data_loader)) writer.add_scalar(\"loss", "Predicted spectrogram if linear_outputs is not None: linear_output = linear_outputs[idx].cpu().data.numpy()", "- len(seq)), mode='constant', constant_values=constant_values) def _pad_2d(x, max_len, b_pad=0): x =", "\\ (text_positions, frame_positions), done, target_lengths, speaker_ids def time_string(): return datetime.now().strftime('%Y-%m-%d", "seq_length_expand = sequence_length.unsqueeze(1) \\ .expand_as(seq_range_expand) return (seq_range_expand < seq_length_expand).float() class", ":], mel[:, r:, :], decoder_target_mask) mel_loss = (1 - w)", "x in batch]) else: speaker_ids = None return x_batch, input_lengths,", "join(checkpoint_dir, \"alignment_layer{}\".format(i + 1)) os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, \"step{:09d}_layer_{}_alignment.png\".format(", "__len__(self): return len(self.X) def sequence_mask(sequence_length, max_len=None): if max_len is None:", "model. --log-event-path=<name> Log event path. --reset-optimizer Reset optimizer. --load-embedding=<path> Load", "= (spectrogram - np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram)) spectrogram =", "global_step) # Predicted spectrogram if linear_outputs is not None: linear_output", "# attention if train_seq2seq and hparams.use_guided_attention: soft_mask = guided_attentions(input_lengths, decoder_lengths,", "checkpoint_dir, global_epoch, train_seq2seq, train_postnet) if global_step > 0 and global_step", "+ eps) - torch.log(1 - x + eps) def masked_mean(y,", "return x_batch, input_lengths, mel_batch, y_batch, \\ (text_positions, frame_positions), done, target_lengths,", "and exit \"\"\" from docopt import docopt import sys from", "= np.random.randint(0, len(input_lengths)) idx = min(1, len(input_lengths) - 1) input_length", "nn.L1Loss(size_average=False) def forward(self, input, target, lengths=None, mask=None, max_len=None): if lengths", "docopt import docopt import sys from os.path import dirname, join", "= np.zeros((max_N, max_T), dtype=np.float32) for n in range(N): for t", "join(checkpoint_dir, \"eval\") os.makedirs(eval_output_dir, exist_ok=True) # hard coded speaker_ids = [0,", "logit(y_hat) z = -y * y_hat_logits + torch.log(1 + torch.exp(y_hat_logits))", "l: int(l.decode(\"utf-8\").split(\"|\")[2]), lines)) paths = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[self.col], lines)) paths", "plt.close() class TextDataSource(FileDataSource): def __init__(self, data_root, speaker_id=None): self.data_root = data_root", "rate schedule if hparams.lr_schedule is not None: lr_schedule_f = getattr(dv3.lrschedule,", "l1_loss = w * masked_l1(y_hat, y, mask=mask) + (1 -", ":priority_bin], mask=mask) \\ + (1 - w) * l1(y_hat[:, :,", "model from dv3.deepvoice3_pytorch import frontend, builder import dv3.audio import dv3.lrschedule", "if linear_outputs is not None: linear_output = linear_outputs[idx].cpu().data.numpy() spectrogram =", "= x.cuda() text_positions = text_positions.cuda() frame_positions = frame_positions.cuda() if train_postnet:", "= sequence_mask( target_lengths, max_len=y.size(1)).unsqueeze(-1) else: target_mask = decoder_target_mask # shift", "soft_mask = Variable(torch.from_numpy(soft_mask)) soft_mask = soft_mask.cuda() if use_cuda else soft_mask", "model_dict.update(valid_state_dict) model.load_state_dict(model_dict) if __name__ == \"__main__\": args = docopt(__doc__) print(\"Command", "0 use_cuda = torch.cuda.is_available() if use_cuda: cudnn.benchmark = False _frontend", "max_N, T, max_T, g): W = np.zeros((max_N, max_T), dtype=np.float32) for", "x = x.cuda() text_positions = text_positions.cuda() frame_positions = frame_positions.cuda() if", "0 # Set 0 for zero beginning padding # imitates", "texts def collect_features(self, *args): if self.multi_speaker: text, speaker_id = args", "--data-root=<dir> Directory contains preprocessed features. --checkpoint-dir=<dir> Directory where to save", "class MelSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id)", "for i, alignment in enumerate(attn): alignment = alignment[idx].cpu().data.numpy() tag =", "\"step{:09d}_layer_{}_alignment.png\".format( global_step, i + 1)) save_alignment(path, alignment) # Save averaged", "return l1_loss, binary_div @jit(nopython=True) def guided_attention(N, max_N, T, max_T, g):", "= Mel.file_data_source.frame_lengths sampler = PartialyRandomizedSimilarTimeLengthSampler( frame_lengths, batch_size=hparams.batch_size) # Dataset and", "return np.load(path) class MelSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(MelSpecDataSource, self).__init__(data_root,", "self.multi_speaker: speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines)) # Filter by", "alias self.multi_speaker = X.file_data_source.multi_speaker def __getitem__(self, idx): if self.multi_speaker: text,", "train_postnet global global_step, global_epoch while global_epoch < nepochs: running_loss =", "postnet model\") else: assert False, \"must be specified wrong args\"", "idx = min(1, len(input_lengths) - 1) input_length = input_lengths[idx] #", "Alignment path = join(eval_output_dir, \"step{:09d}_text{}_{}_alignment.png\".format( global_step, idx, speaker_str)) save_alignment(path, alignment)", "float(mel_loss.data[0]), global_step) writer.add_scalar(\"mel_l1_loss\", float(mel_l1_loss.data[0]), global_step) writer.add_scalar(\"mel_binary_div_loss\", float(mel_binary_div.data[0]), global_step) if train_postnet:", "spectrogram if downsample_step > 1: mel = mel[:, 0::downsample_step, :].contiguous()", "# Target spectrogram if linear_outputs is not None: linear_output =", "= torch.LongTensor([x[3] for x in batch]) else: speaker_ids = None", "np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # Predicted mel spectrogram if", "LinearSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id) class", "w) * mel_l1_loss + w * mel_binary_div # done: if", "model) # Setup summary writer for tensorboard if log_event_path is", "checkpoint = torch.load(path) model.load_state_dict(checkpoint[\"state_dict\"]) if not reset_optimizer: optimizer_state = checkpoint[\"optimizer\"]", "not None: load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer) # Load embedding if", "train_postnet: print(\"Training whole model\") train_seq2seq, train_postnet = True, True if", "eval_output_dir = join(checkpoint_dir, \"eval\") os.makedirs(eval_output_dir, exist_ok=True) # hard coded speaker_ids", "= mask.expand_as(y) return (y * mask_).sum() / mask_.sum() def spec_loss(y_hat,", "= join(alignment_dir, \"step{:09d}_layer_{}_alignment.png\".format( global_step, i + 1)) save_alignment(path, alignment) #", "or mask\") # (B, T, 1) if mask is None:", "255) def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker): # harded coded", "spectrogram if linear_outputs is not None: linear_output = y[idx].cpu().data.numpy() spectrogram", "if not specified if not train_seq2seq and not train_postnet: print(\"Training", "train_seq2seq and not train_postnet: print(\"Training whole model\") train_seq2seq, train_postnet =", "seq_range_expand.cuda() seq_length_expand = sequence_length.unsqueeze(1) \\ .expand_as(seq_range_expand) return (seq_range_expand < seq_length_expand).float()", "+ w * linear_binary_div # Combine losses if train_seq2seq and", "spec_loss( linear_outputs[:, :-r, :], y[:, r:, :], target_mask, priority_bin=n_priority_freq, priority_w=hparams.priority_freq_weight)", "set later def _pad(seq, max_len, constant_values=0): return np.pad(seq, (0, max_len", "mel_binary_div # done: if train_seq2seq: done_loss = binary_criterion(done_hat, done) #", "n_vocab=_frontend.n_vocab, embed_dim=hparams.text_embed_dim, mel_dim=hparams.num_mels, linear_dim=hparams.fft_size // 2 + 1, r=hparams.outputs_per_step, downsample_step=hparams.downsample_step,", "spec_loss( mel_outputs[:, :-r, :], mel[:, r:, :], decoder_target_mask) mel_loss =", "schedule if hparams.lr_schedule is not None: lr_schedule_f = getattr(dv3.lrschedule, hparams.lr_schedule)", "save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet) if global_step", "mel_outputs[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Predicted mel spectrogram\", mel_output, global_step) #", "in case for multi-speaker datasets. -h, --help Show this help", "global_step % hparams.eval_interval == 0: eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker)", "= mel[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Target mel spectrogram\", mel_output, global_step)", "or variational auto-encoder.\", \"Please call Stella.\", \"Some have accepted this", "data_root = join(dirname(__file__), \"data\", \"ljspeech\") log_event_path = args[\"--log-event-path\"] reset_optimizer =", "torch.arange(s, e).long().unsqueeze(0).expand( len(batch), max_decoder_target_len) # done flags done = np.array([_pad(np.zeros(len(x[1])", "if batch_group_size % batch_size != 0: batch_group_size -= batch_group_size %", "forward(self, input, target, lengths=None, mask=None, max_len=None): if lengths is None", "/ mask_.sum() def collate_fn(batch): \"\"\"Create batch\"\"\" r = hparams.outputs_per_step downsample_step", "n in range(N): for t in range(T): W[n, t] =", "time_string(), global_step)) def prepare_spec_image(spectrogram): # [0, 1] spectrogram = (spectrogram", "= args[\"--log-event-path\"] reset_optimizer = args[\"--reset-optimizer\"] # Which model to be", "s - 1, e - 1 frame_positions = torch.arange(s, e).long().unsqueeze(0).expand(", "self).__init__() self.criterion = nn.L1Loss(size_average=False) def forward(self, input, target, lengths=None, mask=None,", "+= 1 running_loss += loss.data[0] averaged_loss = running_loss / (len(data_loader))", "max_len=None): if lengths is None and mask is None: raise", "\"There's a way to measure the acute emotional intelligence that", "FileSourceDataset, FileDataSource from os.path import join, expanduser import random import", "Variable(frame_positions) done = Variable(done) target_lengths = Variable(target_lengths) speaker_ids = Variable(speaker_ids)", "hyper parameters hparams.parse(args[\"--hparams\"]) print(hparams_debug_string()) assert hparams.name == \"deepvoice3\" # Presets", "mask_) return loss / mask_.sum() def collate_fn(batch): \"\"\"Create batch\"\"\" r", "audio signal {}_{}\".format(idx, speaker_str), signal, global_step, sample_rate=fs) except Exception as", "if use_cuda: model = model.cuda() linear_dim = model.linear_dim r =", "= speaker_ids.cuda() if ismultispeaker else None # Create mask if", "np from numba import jit from nnmnkwii.datasets import FileSourceDataset, FileDataSource", "train_seq2seq, train_postnet): if train_seq2seq and train_postnet: suffix = \"\" m", "if self.multi_speaker: speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines)) # Filter", "torch.cuda.is_available() if use_cuda: cudnn.benchmark = False _frontend = None #", "y = Variable(x), Variable(mel), Variable(y) text_positions = Variable(text_positions) frame_positions =", "len(l) == 5 texts = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[3], lines)) if", "model from checkpoint path. --train-seq2seq-only Train only seq2seq model. --train-postnet-only", ":, :priority_bin], y[:, :, :priority_bin]) else: priority_loss = l1(y_hat[:, :,", "join(alignment_dir, \"step{:09d}_alignment.png\".format(global_step)) alignment = attn.mean(0)[idx].cpu().data.numpy() save_alignment(path, alignment) tag = \"averaged_alignment\"", "% hparams.eval_interval == 0: eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker) #", "speaker_id) class PartialyRandomizedSimilarTimeLengthSampler(Sampler): \"\"\"Partially randmoized sampler 1. Sort by lengths", "0. for step, (x, input_lengths, mel, y, positions, done, target_lengths,", "max_len=None): if max_len is None: max_len = sequence_length.data.max() batch_size =", "enumerate(attn): alignment = alignment[idx].cpu().data.numpy() tag = \"alignment_layer{}\".format(i + 1) writer.add_image(tag,", "l1_loss + priority_w * priority_loss # Binary divergence loss if", "prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Predicted linear spectrogram\", spectrogram, global_step) # Predicted audio signal", "checkpoint_path) print(\"Saved checkpoint:\", checkpoint_path) def build_model(): model = getattr(builder, hparams.builder)(", "trange from datetime import datetime # The deepvoice3 model from", "global_epoch = 0 use_cuda = torch.cuda.is_available() if use_cuda: cudnn.benchmark =", "-1, mel.size(-1)) linear_outputs = None elif train_postnet: assert speaker_ids is", "done_loss elif train_postnet: loss = linear_loss # attention if train_seq2seq", "definitions X = FileSourceDataset(TextDataSource(data_root, speaker_id)) Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id)) Y", "Variable(seq_range_expand) if sequence_length.is_cuda: seq_range_expand = seq_range_expand.cuda() seq_length_expand = sequence_length.unsqueeze(1) \\", "as plt import sys import os from tensorboardX import SummaryWriter", "mel[:, r:, :], decoder_target_mask) mel_loss = (1 - w) *", "nepochs=None, clip_thresh=1.0, train_seq2seq=True, train_postnet=True): if use_cuda: model = model.cuda() linear_dim", "= join(eval_output_dir, \"step{:09d}_text{}_{}_alignment.png\".format( global_step, idx, speaker_str)) save_alignment(path, alignment) tag =", "specified if not train_seq2seq and not train_postnet: print(\"Training whole model\")", "Directory contains preprocessed features. --checkpoint-dir=<dir> Directory where to save model", "W[n, t] = 1 - np.exp(-(n / N - t", "hparams.downsample_step current_lr = init_lr binary_criterion = nn.BCELoss() assert train_seq2seq or", "0.5) * linear_dim) linear_l1_loss, linear_binary_div = spec_loss( linear_outputs[:, :-r, :],", "features. --checkpoint-dir=<dir> Directory where to save model checkpoints [default: checkpoints].", "= None, None, None # Losses w = hparams.binary_divergence_weight #", "__len__(self): return len(self.sorted_indices) class PyTorchDataset(object): def __init__(self, X, Mel, Y):", "class PyTorchDataset(object): def __init__(self, X, Mel, Y): self.X = X", "min(1, len(input_lengths) - 1) input_length = input_lengths[idx] # Alignment #", "data_root is None: data_root = join(dirname(__file__), \"data\", \"ljspeech\") log_event_path =", "{k: v for k, v in state.items() if k in", "speaker_ids is None linear_outputs = model.postnet(mel) mel_outputs, attn, done_hat =", "torch.LongTensor(target_lengths) b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in batch],", "writer.add_scalar(\"mel_l1_loss\", float(mel_l1_loss.data[0]), global_step) writer.add_scalar(\"mel_binary_div_loss\", float(mel_binary_div.data[0]), global_step) if train_postnet: writer.add_scalar(\"linear_loss\", float(linear_loss.data[0]),", "from nnmnkwii.datasets import FileSourceDataset, FileDataSource from os.path import join, expanduser", "if train_seq2seq and hparams.use_guided_attention: writer.add_scalar(\"attn_loss\", float(attn_loss.data[0]), global_step) if clip_thresh >", "= torch.sort(torch.LongTensor(lengths)) self.batch_size = batch_size if batch_group_size is None: batch_group_size", "of the model. --log-event-path=<name> Log event path. --reset-optimizer Reset optimizer.", "return np.asarray(seq, dtype=np.int32), int(speaker_id) else: return np.asarray(seq, dtype=np.int32) class _NPYDataSource(FileDataSource):", "max_len) seq_range_expand = Variable(seq_range_expand) if sequence_length.is_cuda: seq_range_expand = seq_range_expand.cuda() seq_length_expand", "x in batch], dtype=np.int) text_positions = torch.LongTensor(text_positions) max_decoder_target_len = max_target_len", "else: binary_div = z.mean() return l1_loss, binary_div @jit(nopython=True) def guided_attention(N,", "\\ .expand_as(seq_range_expand) return (seq_range_expand < seq_length_expand).float() class MaskedL1Loss(nn.Module): def __init__(self):", "priority_w=hparams.priority_freq_weight) linear_loss = (1 - w) * linear_l1_loss + w", "batch_group_size e = s + batch_group_size random.shuffle(indices[s:e]) # Permutate batches", "writer.add_image(\"Target linear spectrogram\", spectrogram, global_step) def logit(x, eps=1e-8): return torch.log(x", "auto-encoder.\", \"Please call Stella.\", \"Some have accepted this as a", "# text positions text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len)", "nn from torch import optim import torch.backends.cudnn as cudnn from", "= args[\"--checkpoint-dir\"] checkpoint_path = args[\"--checkpoint\"] checkpoint_seq2seq_path = args[\"--checkpoint-seq2seq\"] checkpoint_postnet_path =", "use_cuda: model = model.cuda() linear_dim = model.linear_dim r = hparams.outputs_per_step", "loss if w > 0: assert mask is not None", "not None, filter by speaker_id self.speaker_id = speaker_id def collect_files(self):", "batch_group_size is None: batch_group_size = min(batch_size * 32, len(self.lengths)) if", "def __len__(self): return len(self.sorted_indices) class PyTorchDataset(object): def __init__(self, X, Mel,", "matplotlib import pyplot as plt import sys import os from", "else None data_root = args[\"--data-root\"] if data_root is None: data_root", "np.uint8(cm.magma(spectrogram.T) * 255) def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker): #", "import SummaryWriter from matplotlib import cm from warnings import warn", "= list(np.array(self.frame_lengths)[indices]) # aha, need to cast numpy.int64 to int", "hparams.eval_interval == 0: eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker) # Update", "None and attn.dim() == 4: for i, alignment in enumerate(attn):", "prepare_spec_image(spectrogram): # [0, 1] spectrogram = (spectrogram - np.min(spectrogram)) /", "None: load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer) # Load embedding if load_embedding", "mel_outputs, linear_outputs, attn, mel, y, input_lengths, checkpoint_dir) save_checkpoint( model, optimizer,", "None # to be set later def _pad(seq, max_len, constant_values=0):", "255), global_step) # Mel writer.add_image(\"(Eval) Predicted mel spectrogram text{}_{}\".format(idx, speaker_str),", "[options] options: --data-root=<dir> Directory contains preprocessed features. --checkpoint-dir=<dir> Directory where", "w * masked_mean(z, mask) + (1 - w) * z.mean()", "optimizer, writer, init_lr=hparams.initial_learning_rate, checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.checkpoint_interval, nepochs=hparams.nepochs, clip_thresh=hparams.clip_thresh, train_seq2seq=train_seq2seq, train_postnet=train_postnet) except", "else: decoder_target_mask, target_mask = None, None # Apply model if", "= hparams.sample_rate global_step = 0 global_epoch = 0 use_cuda =", "loss if priority_bin is not None and priority_w > 0:", "paths def collect_features(self, path): return np.load(path) class MelSpecDataSource(_NPYDataSource): def __init__(self,", "def __init__(self, data_root, speaker_id=None): super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id) class PartialyRandomizedSimilarTimeLengthSampler(Sampler):", "Dataset and Dataloader setup dataset = PyTorchDataset(X, Mel, Y) data_loader", "5 self.frame_lengths = list( map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[2]), lines)) paths =", "args[\"--train-seq2seq-only\"] train_postnet = args[\"--train-postnet-only\"] # train both if not specified", "target_lengths, speaker_ids def time_string(): return datetime.now().strftime('%Y-%m-%d %H:%M') def save_alignment(path, attn):", "else soft_mask attn_loss = (attn * soft_mask).mean() loss += attn_loss", "if downsample_step > 1: # spectrogram-domain mask target_mask = sequence_mask(", "MelSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id) class", "None: linear_output = y[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Target linear spectrogram\",", "return model def _load_embedding(path, model): state = torch.load(path)[\"state_dict\"] key =", "padding # imitates initial decoder states b_pad = r max_target_len", "FileDataSource from os.path import join, expanduser import random import librosa.display", "clip_thresh=1.0, train_seq2seq=True, train_postnet=True): if use_cuda: model = model.cuda() linear_dim =", "list(np.array(texts)[indices]) self.multi_speaker = False return texts return texts, speaker_ids else:", "import sys import os from tensorboardX import SummaryWriter from matplotlib", "model.postnet checkpoint_path = join( checkpoint_dir, \"checkpoint_step{:09d}{}.pth\".format(global_step, suffix)) optimizer_state = optimizer.state_dict()", "in speaker_ids: speaker_str = \"multispeaker{}\".format(speaker_id) if speaker_id is not None", "max(input_lengths) target_lengths = [len(x[1]) for x in batch] max_target_len =", "{}_{}\".format(idx, speaker_str), signal, global_step, sample_rate=fs) except Exception as e: warn(str(e))", "\"Scientists at the CERN laboratory say they have discovered a", "= -y * y_hat_logits + torch.log(1 + torch.exp(y_hat_logits)) if w", "= np.array(speaker_ids) == self.speaker_id texts = list(np.array(texts)[indices]) self.multi_speaker = False", "+ priority_w * priority_loss # Binary divergence loss if hparams.binary_divergence_weight", "iter(indices) def __len__(self): return len(self.sorted_indices) class PyTorchDataset(object): def __init__(self, X,", "k in model_dict} model_dict.update(valid_state_dict) model.load_state_dict(model_dict) if __name__ == \"__main__\": args", "w = hparams.binary_divergence_weight # mel: if train_seq2seq: mel_l1_loss, mel_binary_div =", "b_pad=b_pad) for x in batch], dtype=np.float32) mel_batch = torch.FloatTensor(b) c", "l1_loss = (1 - priority_w) * l1_loss + priority_w *", "+ 1 # if b_pad > 0: # s, e", "speaker_id)) # Prepare sampler frame_lengths = Mel.file_data_source.frame_lengths sampler = PartialyRandomizedSimilarTimeLengthSampler(", "# linear: if train_postnet: n_priority_freq = int(hparams.priority_freq / (fs *", "from {}\".format(path)) optimizer.load_state_dict(checkpoint[\"optimizer\"]) global_step = checkpoint[\"global_step\"] global_epoch = checkpoint[\"global_epoch\"] return", "len(l) == 5 multi_speaker = len(l) == 5 self.frame_lengths =", "= \"multispeaker{}\".format(speaker_id) if speaker_id is not None else \"single\" for", "numpy as np from numba import jit from nnmnkwii.datasets import", "== self.speaker_id paths = list(np.array(paths)[indices]) self.frame_lengths = list(np.array(self.frame_lengths)[indices]) # aha,", "is None mel_outputs, attn, done_hat, _ = model.seq2seq( x, mel,", "len(l) == 4 or len(l) == 5 multi_speaker = len(l)", "/ (2 * g * g)) return W def guided_attentions(input_lengths,", "warnings import warn from dv3.hparams import hparams, hparams_debug_string fs =", "window_backward=hparams.window_backward, key_projection=hparams.key_projection, value_projection=hparams.value_projection, ) return model def load_checkpoint(path, model, optimizer,", "dv3.lrschedule import torch from torch.utils import data as data_utils from", "batch_group_size): s = i * batch_group_size e = s +", "harded coded texts = [ \"Scientists at the CERN laboratory", "max_len - len(x) - b_pad), (0, 0)], mode=\"constant\", constant_values=0) return", "batch], dtype=np.int) x_batch = torch.LongTensor(a) input_lengths = torch.LongTensor(input_lengths) target_lengths =", "prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Target linear spectrogram\", spectrogram, global_step) def logit(x, eps=1e-8): return", ":priority_bin], y[:, :, :priority_bin]) else: priority_loss = l1(y_hat[:, :, :priority_bin],", "1).T) * 255), global_step) # Mel writer.add_image(\"(Eval) Predicted mel spectrogram", "path, info=None): fig, ax = plt.subplots() im = ax.imshow( alignment,", "* 255), global_step) # save files as well for now", "= (1 - w) * mel_l1_loss + w * mel_binary_div", "3. Permutate mini-batchs \"\"\" def __init__(self, lengths, batch_size=16, batch_group_size=None, permutate=True):", "Input dataset definitions X = FileSourceDataset(TextDataSource(data_root, speaker_id)) Mel = FileSourceDataset(MelSpecDataSource(data_root,", "{}\".format(path)) state = torch.load(path)[\"state_dict\"] model_dict = model.state_dict() valid_state_dict = {k:", "max_len=y.size(1)).unsqueeze(-1) else: target_mask = decoder_target_mask # shift mask decoder_target_mask =", "getattr(dv3.lrschedule, hparams.lr_schedule) current_lr = lr_schedule_f( init_lr, global_step, **hparams.lr_schedule_kwargs) for param_group", "parameters hparams.parse(args[\"--hparams\"]) print(hparams_debug_string()) assert hparams.name == \"deepvoice3\" # Presets if", "(spectrogram - np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram)) spectrogram = np.flip(spectrogram,", "r // downsample_step # frame positions s, e = 1,", "batch_size != 0: batch_group_size -= batch_group_size % batch_size self.batch_group_size =", "FileSourceDataset(TextDataSource(data_root, speaker_id)) Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id)) Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id))", "L1 loss if priority_bin is not None and priority_w >", "text_positions = Variable(text_positions) frame_positions = Variable(frame_positions) done = Variable(done) target_lengths", "out of style.\", \"President Trump met with other leaders at", "args[\"--load-embedding\"] checkpoint_restore_parts = args[\"--restore-parts\"] speaker_id = args[\"--speaker-id\"] speaker_id = int(speaker_id)", "optimizer, reset_optimizer) if checkpoint_seq2seq_path is not None: load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer,", "mask_ = mask.expand_as(input) loss = self.criterion(input * mask_, target *", "max_input_len, target_lengths[b], max_target_len, g).T return W def train(model, data_loader, optimizer,", "using multi-speaker dataset as a single speaker dataset indices =", "decoder_channels=hparams.decoder_channels, converter_channels=hparams.converter_channels, use_memory_mask=hparams.use_memory_mask, trainable_positional_encodings=hparams.trainable_positional_encodings, force_monotonic_attention=hparams.force_monotonic_attention, use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input, max_positions=hparams.max_positions, speaker_embedding_weight_std=hparams.speaker_embedding_weight_std, freeze_embedding=hparams.freeze_embedding, window_ahead=hparams.window_ahead,", "# Binary divergence loss if hparams.binary_divergence_weight <= 0: binary_div =", "batch_group_size -= batch_group_size % batch_size self.batch_group_size = batch_group_size assert batch_group_size", "# Which model to be trained train_seq2seq = args[\"--train-seq2seq-only\"] train_postnet", "= \"eval_averaged_alignment_{}_{}\".format(idx, speaker_str) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) #", "KeyboardInterrupt: save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet) print(\"Finished\")", "small patch and randomize it 3. Permutate mini-batchs \"\"\" def", "seq_range_expand = seq_range_expand.cuda() seq_length_expand = sequence_length.unsqueeze(1) \\ .expand_as(seq_range_expand) return (seq_range_expand", "text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) elif train_seq2seq: assert speaker_ids is None mel_outputs,", "dtype=np.int32), int(speaker_id) else: return np.asarray(seq, dtype=np.int32) class _NPYDataSource(FileDataSource): def __init__(self,", "optimizer, writer, init_lr=0.002, checkpoint_dir=None, checkpoint_interval=None, nepochs=None, clip_thresh=1.0, train_seq2seq=True, train_postnet=True): if", "list(np.array(paths)[indices]) self.frame_lengths = list(np.array(self.frame_lengths)[indices]) # aha, need to cast numpy.int64", "// downsample_step # frame positions s, e = 1, max_decoder_target_len", "hparams.parse_json(json.dumps(preset)) print(\"Override hyper parameters with preset \\\"{}\\\": {}\".format( hparams.preset, json.dumps(preset,", "global_step = checkpoint[\"global_step\"] global_epoch = checkpoint[\"global_epoch\"] return model def _load_embedding(path,", "trainable_positional_encodings=hparams.trainable_positional_encodings, force_monotonic_attention=hparams.force_monotonic_attention, use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input, max_positions=hparams.max_positions, speaker_embedding_weight_std=hparams.speaker_embedding_weight_std, freeze_embedding=hparams.freeze_embedding, window_ahead=hparams.window_ahead, window_backward=hparams.window_backward, key_projection=hparams.key_projection, value_projection=hparams.value_projection,", "y_hat_logits + torch.log(1 + torch.exp(y_hat_logits)) if w > 0: binary_div", "range(len(indices) // batch_group_size): s = i * batch_group_size e =", "= getattr(builder, hparams.builder)( n_speakers=hparams.n_speakers, speaker_embed_dim=hparams.speaker_embed_dim, n_vocab=_frontend.n_vocab, embed_dim=hparams.text_embed_dim, mel_dim=hparams.num_mels, linear_dim=hparams.fft_size //", "batch_group_size % batch_size != 0: batch_group_size -= batch_group_size % batch_size", "If not None, filter by speaker_id self.speaker_id = speaker_id def", "of the model from: {}\".format(path)) state = torch.load(path)[\"state_dict\"] model_dict =", "hyper parameters with preset \\\"{}\\\": {}\".format( hparams.preset, json.dumps(preset, indent=4))) _frontend", "lines[0].decode(\"utf-8\").split(\"|\") assert len(l) == 4 or len(l) == 5 self.multi_speaker", "checkpoint_postnet_path = args[\"--checkpoint-postnet\"] load_embedding = args[\"--load-embedding\"] checkpoint_restore_parts = args[\"--restore-parts\"] speaker_id", "# Load embedding if load_embedding is not None: print(\"Loading embedding", "done, target_lengths, speaker_ids def time_string(): return datetime.now().strftime('%Y-%m-%d %H:%M') def save_alignment(path,", "else [None] for speaker_id in speaker_ids: speaker_str = \"multispeaker{}\".format(speaker_id) if", "use masked loss if hparams.masked_loss_weight > 0: # decoder output", "paths)) if multi_speaker and self.speaker_id is not None: speaker_ids =", "linear_binary_div # Combine losses if train_seq2seq and train_postnet: loss =", "np.zeros((max_N, max_T), dtype=np.float32) for n in range(N): for t in", "downsample_step assert max_target_len % downsample_step == 0 # Set 0", "\"optimizer\": optimizer_state, \"global_step\": step, \"global_epoch\": epoch, }, checkpoint_path) print(\"Saved checkpoint:\",", "if train_seq2seq and train_postnet: loss = mel_loss + linear_loss +", "def prepare_spec_image(spectrogram): # [0, 1] spectrogram = (spectrogram - np.min(spectrogram))", "Update loss.backward() if clip_thresh > 0: grad_norm = torch.nn.utils.clip_grad_norm( model.get_trainable_parameters(),", "min(batch_size * 32, len(self.lengths)) if batch_group_size % batch_size != 0:", "postnet model from checkpoint path. --train-seq2seq-only Train only seq2seq model.", "1 # if b_pad > 0: # s, e =", "numba import jit from nnmnkwii.datasets import FileSourceDataset, FileDataSource from os.path", "os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, \"step{:09d}_alignment.png\".format(global_step)) alignment = attn.mean(0)[idx].cpu().data.numpy() save_alignment(path,", "later def _pad(seq, max_len, constant_values=0): return np.pad(seq, (0, max_len -", "max_target_len // r // downsample_step # frame positions s, e", "join(checkpoint_dir, \"step{:09d}_predicted.wav\".format( global_step)) try: writer.add_audio(\"Predicted audio signal\", signal, global_step, sample_rate=fs)", "max_target_len % downsample_step == 0 # Set 0 for zero", "global_step) if clip_thresh > 0: writer.add_scalar(\"gradient norm\", grad_norm, global_step) writer.add_scalar(\"learning", "the model from: {}\".format(path)) state = torch.load(path)[\"state_dict\"] model_dict = model.state_dict()", "= dv3.audio.inv_spectrogram(linear_output.T) signal /= np.max(np.abs(signal)) path = join(checkpoint_dir, \"step{:09d}_predicted.wav\".format( global_step))", "hparams.preset != \"\": preset = hparams.presets[hparams.preset] import json hparams.parse_json(json.dumps(preset)) print(\"Override", "= self.batch_group_size s, e = 0, 0 for i in", "def plot_alignment(alignment, path, info=None): fig, ax = plt.subplots() im =", "MaskedL1Loss(nn.Module): def __init__(self): super(MaskedL1Loss, self).__init__() self.criterion = nn.L1Loss(size_average=False) def forward(self,", "len(l) == 5 self.frame_lengths = list( map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[2]), lines))", "init_lr, global_step, **hparams.lr_schedule_kwargs) for param_group in optimizer.param_groups: param_group['lr'] = current_lr", "* priority_loss # Binary divergence loss if hparams.binary_divergence_weight <= 0:", "priority_bin=n_priority_freq, priority_w=hparams.priority_freq_weight) linear_loss = (1 - w) * linear_l1_loss +", "// r // downsample_step # Feed data x, mel, y", "contains preprocessed features. --checkpoint-dir=<dir> Directory where to save model checkpoints", "current_lr = lr_schedule_f( init_lr, global_step, **hparams.lr_schedule_kwargs) for param_group in optimizer.param_groups:", "import docopt import sys from os.path import dirname, join from", "= l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin]) l1_loss = (1", "speaker dataset if self.speaker_id is not None: indices = np.array(speaker_ids)", "load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer) if checkpoint_path is not None: load_checkpoint(checkpoint_path,", "is not None: lr_schedule_f = getattr(dv3.lrschedule, hparams.lr_schedule) current_lr = lr_schedule_f(", "= seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand = Variable(seq_range_expand) if sequence_length.is_cuda: seq_range_expand =", "= torch.FloatTensor(b) c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in", "T, D) mask_ = mask.expand_as(input) loss = self.criterion(input * mask_,", "given. --checkpoint-seq2seq=<path> Restore seq2seq model from checkpoint path. --checkpoint-postnet=<path> Restore", "mask=mask) + (1 - w) * l1(y_hat, y) else: assert", "in batch] max_input_len = max(input_lengths) target_lengths = [len(x[1]) for x", "axis=1) # flip against freq axis return np.uint8(cm.magma(spectrogram.T) * 255)", "and not train_postnet: print(\"Training whole model\") train_seq2seq, train_postnet = True,", "self.batch_size = batch_size if batch_group_size is None: batch_group_size = min(batch_size", "positions # Downsample mel spectrogram if downsample_step > 1: mel", "if self.multi_speaker: return np.asarray(seq, dtype=np.int32), int(speaker_id) else: return np.asarray(seq, dtype=np.int32)", "/= np.max(np.abs(signal)) path = join(checkpoint_dir, \"step{:09d}_predicted.wav\".format( global_step)) try: writer.add_audio(\"Predicted audio", "== 4 or len(l) == 5 multi_speaker = len(l) ==", "# Audio path = join(eval_output_dir, \"step{:09d}_text{}_{}_predicted.wav\".format( global_step, idx, speaker_str)) dv3.audio.save_wav(signal,", "dirname, join from tqdm import tqdm, trange from datetime import", "speaker_ids is not None # Learning rate schedule if hparams.lr_schedule", "in batch]) else: speaker_ids = None return x_batch, input_lengths, mel_batch,", "checkpoint[\"optimizer\"] if optimizer_state is not None: print(\"Load optimizer state from", "= linear_loss # attention if train_seq2seq and hparams.use_guided_attention: soft_mask =", "sequence_mask( target_lengths, max_len=y.size(1)).unsqueeze(-1) else: target_mask = decoder_target_mask # shift mask", "reset_optimizer) # Load embedding if load_embedding is not None: print(\"Loading", "0: eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker) # Update loss.backward() if", "\"global_epoch\": epoch, }, checkpoint_path) print(\"Saved checkpoint:\", checkpoint_path) def build_model(): model", "speaker_id def collect_files(self): meta = join(self.data_root, \"train.txt\") with open(meta, \"rb\")", "downsample_step != 0: max_target_len += downsample_step - max_target_len % downsample_step", "is not None else \"single\" for idx, text in enumerate(texts):", "train_postnet: mel_outputs, linear_outputs, attn, done_hat = model( x, mel, speaker_ids=speaker_ids,", "os.makedirs(eval_output_dir, exist_ok=True) # hard coded speaker_ids = [0, 1, 10]", "train_seq2seq: assert speaker_ids is None mel_outputs, attn, done_hat, _ =", "= speaker_ids is not None # Learning rate schedule if", "CERN laboratory say they have discovered a new particle.\", \"There's", "model, text, p=0, speaker_id=speaker_id, fast=False) signal /= np.max(np.abs(signal)) # Alignment", "if checkpoint_path is not None: load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer) #", "x in batch] max_target_len = max(target_lengths) if max_target_len % r", "e).long().unsqueeze(0).expand( len(batch), max_decoder_target_len) # done flags done = np.array([_pad(np.zeros(len(x[1]) //", "checkpoints [default: checkpoints]. --hparams=<parmas> Hyper parameters [default: ]. --checkpoint=<path> Restore", "= PyTorchDataset(X, Mel, Y) data_loader = data_utils.DataLoader( dataset, batch_size=hparams.batch_size, num_workers=hparams.num_workers,", "timestep') plt.tight_layout() plt.savefig(path, format='png') plt.close() class TextDataSource(FileDataSource): def __init__(self, data_root,", "0 for zero beginning padding # imitates initial decoder states", "m = model elif train_seq2seq: suffix = \"_seq2seq\" m =", "RuntimeError(\"Should provide either lengths or mask\") # (B, T, 1)", "if hparams.binary_divergence_weight <= 0: binary_div = Variable(y.data.new(1).zero_()) else: y_hat_logits =", "torch.load(path)[\"state_dict\"] model_dict = model.state_dict() valid_state_dict = {k: v for k,", "\"\": preset = hparams.presets[hparams.preset] import json hparams.parse_json(json.dumps(preset)) print(\"Override hyper parameters", "lines)) paths = list(map(lambda f: join(self.data_root, f), paths)) if multi_speaker", "[default: checkpoints]. --hparams=<parmas> Hyper parameters [default: ]. --checkpoint=<path> Restore model", "if max_target_len % downsample_step != 0: max_target_len += downsample_step -", "checkpoint_dir, \"checkpoint_step{:09d}{}.pth\".format(global_step, suffix)) optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None", "if not reset_optimizer: optimizer_state = checkpoint[\"optimizer\"] if optimizer_state is not", "optimizer.param_groups: param_group['lr'] = current_lr optimizer.zero_grad() # Used for Position encoding", "global_step) writer.add_scalar(\"mel loss\", float(mel_loss.data[0]), global_step) writer.add_scalar(\"mel_l1_loss\", float(mel_l1_loss.data[0]), global_step) writer.add_scalar(\"mel_binary_div_loss\", float(mel_binary_div.data[0]),", "int(speaker_id) if speaker_id is not None else None data_root =", "checkpoint path. --checkpoint-postnet=<path> Restore postnet model from checkpoint path. --train-seq2seq-only", "def collate_fn(batch): \"\"\"Create batch\"\"\" r = hparams.outputs_per_step downsample_step = hparams.downsample_step", "assert mask is not None l1_loss = w * masked_l1(y_hat,", "\"President Trump met with other leaders at the Group of", "None: mel_output = mel_outputs[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Predicted mel spectrogram\",", "None: raise RuntimeError(\"Should provide either lengths or mask\") # (B,", "prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Predicted mel spectrogram\", mel_output, global_step) # Predicted spectrogram if", "= Variable(text_positions) frame_positions = Variable(frame_positions) done = Variable(done) target_lengths =", "% checkpoint_interval == 0: save_states( global_step, writer, mel_outputs, linear_outputs, attn,", "> 1: mel = mel[:, 0::downsample_step, :].contiguous() # Lengths input_lengths", "# Logs writer.add_scalar(\"loss\", float(loss.data[0]), global_step) if train_seq2seq: writer.add_scalar(\"done_loss\", float(done_loss.data[0]), global_step)", "self.X[idx] return text, self.Mel[idx], self.Y[idx], speaker_id else: return self.X[idx], self.Mel[idx],", "data_root, speaker_id=None): super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id) class PartialyRandomizedSimilarTimeLengthSampler(Sampler): \"\"\"Partially randmoized", "\"step{:09d}_predicted.wav\".format( global_step)) try: writer.add_audio(\"Predicted audio signal\", signal, global_step, sample_rate=fs) except", "train_postnet: loss = linear_loss # attention if train_seq2seq and hparams.use_guided_attention:", "= [ \"Scientists at the CERN laboratory say they have", "T, 1) if mask is None: mask = sequence_mask(lengths, max_len).unsqueeze(-1)", "w) * l1(y_hat, y) else: assert mask is None l1_loss", "r=hparams.outputs_per_step, downsample_step=hparams.downsample_step, padding_idx=hparams.padding_idx, dropout=hparams.dropout, kernel_size=hparams.kernel_size, encoder_channels=hparams.encoder_channels, decoder_channels=hparams.decoder_channels, converter_channels=hparams.converter_channels, use_memory_mask=hparams.use_memory_mask, trainable_positional_encodings=hparams.trainable_positional_encodings,", "model_dict = model.state_dict() valid_state_dict = {k: v for k, v", "info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout() plt.savefig(path, format='png') plt.close() class TextDataSource(FileDataSource):", "def __getitem__(self, idx): if self.multi_speaker: text, speaker_id = self.X[idx] return", "* soft_mask).mean() loss += attn_loss if global_step > 0 and", "speaker_ids = speaker_ids.cuda() if ismultispeaker else None # Create mask", "= prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Predicted mel spectrogram\", mel_output, global_step) # Predicted spectrogram", "loss += attn_loss if global_step > 0 and global_step %", "discovered a new particle.\", \"There's a way to measure the", "% downsample_step assert max_target_len % downsample_step == 0 # Set", "that has never gone out of style.\", \"President Trump met", "target_lengths, max_target_len, g=0.2): B = len(input_lengths) max_input_len = input_lengths.max() W", "attn, mel, y, input_lengths, checkpoint_dir) save_checkpoint( model, optimizer, global_step, checkpoint_dir,", "else \"single\" for idx, text in enumerate(texts): signal, alignment, _,", "optimizer.zero_grad() # Used for Position encoding text_positions, frame_positions = positions", "4 or len(l) == 5 self.multi_speaker = len(l) == 5", "only seq2seq model. --train-postnet-only Train only postnet model. --restore-parts=<path> Restore", "Position encoding text_positions, frame_positions = positions # Downsample mel spectrogram", "= [len(x[1]) for x in batch] max_target_len = max(target_lengths) if", "writer.add_scalar(\"linear_loss\", float(linear_loss.data[0]), global_step) writer.add_scalar(\"linear_l1_loss\", float(linear_l1_loss.data[0]), global_step) writer.add_scalar(\"linear_binary_div_loss\", float( linear_binary_div.data[0]), global_step)", "= guided_attention(input_lengths[b], max_input_len, target_lengths[b], max_target_len, g).T return W def train(model,", "attention if train_seq2seq and hparams.use_guided_attention: soft_mask = guided_attentions(input_lengths, decoder_lengths, attn.size(-2),", "target_mask = target_mask[:, r:, :] else: decoder_target_mask, target_mask = None,", "\\ in tqdm(enumerate(data_loader)): model.train() ismultispeaker = speaker_ids is not None", "step={}\".format( hparams.builder, time_string(), global_step)) def prepare_spec_image(spectrogram): # [0, 1] spectrogram", "(1 - w) * l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])", "* linear_dim) linear_l1_loss, linear_binary_div = spec_loss( linear_outputs[:, :-r, :], y[:,", "super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id) class LinearSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None):", "= args[\"--speaker-id\"] speaker_id = int(speaker_id) if speaker_id is not None", "return datetime.now().strftime('%Y-%m-%d %H:%M') def save_alignment(path, attn): plot_alignment(attn.T, path, info=\"{}, {},", "lines)) if self.multi_speaker: speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines)) #", "\"step{:09d}_alignment.png\".format(global_step)) alignment = attn.mean(0)[idx].cpu().data.numpy() save_alignment(path, alignment) tag = \"averaged_alignment\" writer.add_image(tag,", "= data_root self.col = col self.frame_lengths = [] self.speaker_id =", "Setup summary writer for tensorboard if log_event_path is None: log_event_path", "# Filter by speaker_id # using multi-speaker dataset as a", "= torch.load(path)[\"state_dict\"] key = \"seq2seq.encoder.embed_tokens.weight\" model.seq2seq.encoder.embed_tokens.weight.data = state[key] # https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3", "if train_seq2seq and hparams.use_guided_attention: soft_mask = guided_attentions(input_lengths, decoder_lengths, attn.size(-2), g=hparams.guided_attention_sigma)", "= None return x_batch, input_lengths, mel_batch, y_batch, \\ (text_positions, frame_positions),", "guided_attentions(input_lengths, decoder_lengths, attn.size(-2), g=hparams.guided_attention_sigma) soft_mask = Variable(torch.from_numpy(soft_mask)) soft_mask = soft_mask.cuda()", ":, :priority_bin]) l1_loss = (1 - priority_w) * l1_loss +", "preset = hparams.presets[hparams.preset] import json hparams.parse_json(json.dumps(preset)) print(\"Override hyper parameters with", "as well for now alignment_dir = join(checkpoint_dir, \"alignment_layer{}\".format(i + 1))", "model.train() ismultispeaker = speaker_ids is not None # Learning rate", "max_len=mel.size(1)).unsqueeze(-1) if downsample_step > 1: # spectrogram-domain mask target_mask =", "assert hparams.name == \"deepvoice3\" # Presets if hparams.preset is not", "only postnet model. --restore-parts=<path> Restore part of the model. --log-event-path=<name>", "indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1) # Handle last elements s", "postnet model. --restore-parts=<path> Restore part of the model. --log-event-path=<name> Log", "- 1, e - 1 frame_positions = torch.arange(s, e).long().unsqueeze(0).expand( len(batch),", "options: --data-root=<dir> Directory contains preprocessed features. --checkpoint-dir=<dir> Directory where to", "plt.ylabel('Encoder timestep') plt.tight_layout() plt.savefig(path, format='png') plt.close() class TextDataSource(FileDataSource): def __init__(self,", "1. Sort by lengths 2. Pick a small patch and", "def __init__(self, lengths, batch_size=16, batch_group_size=None, permutate=True): self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths))", "print(\"Los event path: {}\".format(log_event_path)) writer = SummaryWriter(log_dir=log_event_path) # Train! try:", "averaged_loss = running_loss / (len(data_loader)) writer.add_scalar(\"loss (per epoch)\", averaged_loss, global_epoch)", "linear_l1_loss, linear_binary_div = spec_loss( linear_outputs[:, :-r, :], y[:, r:, :],", "global_epoch) print(\"Loss: {}\".format(running_loss / (len(data_loader)))) global_epoch += 1 def save_checkpoint(model,", "= \"seq2seq.encoder.embed_tokens.weight\" model.seq2seq.encoder.embed_tokens.weight.data = state[key] # https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 def restore_parts(path, model):", "from {}\".format(load_embedding)) _load_embedding(load_embedding, model) # Setup summary writer for tensorboard", "state from {}\".format(path)) optimizer.load_state_dict(checkpoint[\"optimizer\"]) global_step = checkpoint[\"global_step\"] global_epoch = checkpoint[\"global_epoch\"]", "torch.log(x + eps) - torch.log(1 - x + eps) def", "alignment_dir = join(checkpoint_dir, \"alignment_ave\") os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, \"step{:09d}_alignment.png\".format(global_step))", ":, :priority_bin], mask=mask) \\ + (1 - w) * l1(y_hat[:,", "text_positions.cuda() frame_positions = frame_positions.cuda() if train_postnet: y = y.cuda() mel", "signal signal = dv3.audio.inv_spectrogram(linear_output.T) signal /= np.max(np.abs(signal)) path = join(checkpoint_dir,", "Train only postnet model. --restore-parts=<path> Restore part of the model.", "== 4: for i, alignment in enumerate(attn): alignment = alignment[idx].cpu().data.numpy()", "soft_mask).mean() loss += attn_loss if global_step > 0 and global_step", "writer.add_scalar(\"linear_l1_loss\", float(linear_l1_loss.data[0]), global_step) writer.add_scalar(\"linear_binary_div_loss\", float( linear_binary_div.data[0]), global_step) if train_seq2seq and", "if mel_outputs is not None: mel_output = mel_outputs[idx].cpu().data.numpy() mel_output =", "is not None l1_loss = w * masked_l1(y_hat, y, mask=mask)", "attn.mean(0)[idx].cpu().data.numpy() save_alignment(path, alignment) tag = \"averaged_alignment\" writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) *", "speaker_ids def time_string(): return datetime.now().strftime('%Y-%m-%d %H:%M') def save_alignment(path, attn): plot_alignment(attn.T,", "not None: xlabel += '\\n\\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder timestep')", "import random import librosa.display from matplotlib import pyplot as plt", "= model.state_dict() valid_state_dict = {k: v for k, v in", "np.pad(seq, (0, max_len - len(seq)), mode='constant', constant_values=constant_values) def _pad_2d(x, max_len,", "# s, e = s - 1, e - 1", "- t / T)**2 / (2 * g * g))", "Load embedding if load_embedding is not None: print(\"Loading embedding from", "texts = [ \"Scientists at the CERN laboratory say they", "y, positions, done, target_lengths, speaker_ids) \\ in tqdm(enumerate(data_loader)): model.train() ismultispeaker", "state = torch.load(path)[\"state_dict\"] key = \"seq2seq.encoder.embed_tokens.weight\" model.seq2seq.encoder.embed_tokens.weight.data = state[key] #", "self.multi_speaker: return np.asarray(seq, dtype=np.int32), int(speaker_id) else: return np.asarray(seq, dtype=np.int32) class", "cm from warnings import warn from dv3.hparams import hparams, hparams_debug_string", "pass dv3.audio.save_wav(signal, path) # Target mel spectrogram if mel_outputs is", "hard coded speaker_ids = [0, 1, 10] if ismultispeaker else", "window_ahead=hparams.window_ahead, window_backward=hparams.window_backward, key_projection=hparams.key_projection, value_projection=hparams.value_projection, ) return model def load_checkpoint(path, model,", "= list(map(int, self.frame_lengths)) return paths def collect_features(self, path): return np.load(path)", "0: max_target_len += r - max_target_len % r assert max_target_len", "log_event_path = args[\"--log-event-path\"] reset_optimizer = args[\"--reset-optimizer\"] # Which model to", "= y[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Target linear spectrogram\", spectrogram, global_step)", "= torch.cuda.is_available() if use_cuda: cudnn.benchmark = False _frontend = None", "l1(y_hat, y) else: assert mask is None l1_loss = l1(y_hat,", "from checkpoint path if given. --checkpoint-seq2seq=<path> Restore seq2seq model from", "None: xlabel += '\\n\\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout()", "mel_output = mel[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Target mel spectrogram\", mel_output,", "1, e - 1 frame_positions = torch.arange(s, e).long().unsqueeze(0).expand( len(batch), max_decoder_target_len)", "= torch.load(path) model.load_state_dict(checkpoint[\"state_dict\"]) if not reset_optimizer: optimizer_state = checkpoint[\"optimizer\"] if", "global_step)) def prepare_spec_image(spectrogram): # [0, 1] spectrogram = (spectrogram -", "sample_rate=fs) except Exception as e: warn(str(e)) pass dv3.audio.save_wav(signal, path) #", "# Presets if hparams.preset is not None and hparams.preset !=", "None data_root = args[\"--data-root\"] if data_root is None: data_root =", "np.pad(x, [(b_pad, max_len - len(x) - b_pad), (0, 0)], mode=\"constant\",", "synthesis._frontend = _frontend eval_output_dir = join(checkpoint_dir, \"eval\") os.makedirs(eval_output_dir, exist_ok=True) #", "FileSourceDataset(LinearSpecDataSource(data_root, speaker_id)) # Prepare sampler frame_lengths = Mel.file_data_source.frame_lengths sampler =", "self.criterion = nn.L1Loss(size_average=False) def forward(self, input, target, lengths=None, mask=None, max_len=None):", "2. Pick a small patch and randomize it 3. Permutate", "batch_group_size % batch_size self.batch_group_size = batch_group_size assert batch_group_size % batch_size", "len(batch), max_decoder_target_len) # done flags done = np.array([_pad(np.zeros(len(x[1]) // r", "global_step, i + 1)) save_alignment(path, alignment) # Save averaged alignment", "def spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0): masked_l1 = MaskedL1Loss() l1", "x in batch]) done = torch.FloatTensor(done).unsqueeze(-1) if multi_speaker: speaker_ids =", "l1_loss, binary_div @jit(nopython=True) def guided_attention(N, max_N, T, max_T, g): W", "global_step > 0 and global_step % checkpoint_interval == 0: save_states(", "= permutate def __iter__(self): indices = self.sorted_indices.clone() batch_group_size = self.batch_group_size", "mask is None: mask = sequence_mask(lengths, max_len).unsqueeze(-1) # (B, T,", "torch.arange(0, max_len).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand = Variable(seq_range_expand) if", "@jit(nopython=True) def guided_attention(N, max_N, T, max_T, g): W = np.zeros((max_N,", "writer, mel_outputs, linear_outputs, attn, mel, y, input_lengths, checkpoint_dir) save_checkpoint( model,", "data_root, speaker_id=None): self.data_root = data_root self.speaker_ids = None self.multi_speaker =", "train_seq2seq: loss = mel_loss + done_loss elif train_postnet: loss =", "= join(checkpoint_dir, \"alignment_ave\") os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, \"step{:09d}_alignment.png\".format(global_step)) alignment", "= init_lr binary_criterion = nn.BCELoss() assert train_seq2seq or train_postnet global", "docopt import sys from os.path import dirname, join from tqdm", "synthesis.tts( model, text, p=0, speaker_id=speaker_id, fast=False) signal /= np.max(np.abs(signal)) #", "output domain mask decoder_target_mask = sequence_mask( target_lengths / (r *", "model = getattr(builder, hparams.builder)( n_speakers=hparams.n_speakers, speaker_embed_dim=hparams.speaker_embed_dim, n_vocab=_frontend.n_vocab, embed_dim=hparams.text_embed_dim, mel_dim=hparams.num_mels, linear_dim=hparams.fft_size", "datetime import datetime # The deepvoice3 model from dv3.deepvoice3_pytorch import", "return x def plot_alignment(alignment, path, info=None): fig, ax = plt.subplots()", "g): W = np.zeros((max_N, max_T), dtype=np.float32) for n in range(N):", "mask is not None l1_loss = w * masked_l1(y_hat, y,", "== 0 if max_target_len % downsample_step != 0: max_target_len +=", "global_epoch, train_seq2seq, train_postnet) if global_step > 0 and global_step %", "1, max_decoder_target_len + 1 # if b_pad > 0: #", "mel_l1_loss + w * mel_binary_div # done: if train_seq2seq: done_loss", "speaker_ids) \\ in tqdm(enumerate(data_loader)): model.train() ismultispeaker = speaker_ids is not", "linear_dim=hparams.fft_size // 2 + 1, r=hparams.outputs_per_step, downsample_step=hparams.downsample_step, padding_idx=hparams.padding_idx, dropout=hparams.dropout, kernel_size=hparams.kernel_size,", "dv3.hparams import hparams, hparams_debug_string fs = hparams.sample_rate global_step = 0", "except Exception as e: warn(str(e)) pass dv3.audio.save_wav(signal, path) # Target", "len(self.X) def sequence_mask(sequence_length, max_len=None): if max_len is None: max_len =", "1) if mask is None: mask = sequence_mask(lengths, max_len).unsqueeze(-1) #", "perm = np.arange(len(indices[:e]) // self.batch_size) random.shuffle(perm) indices[:e] = indices[:e].view(-1, self.batch_size)[perm,", "checkpoint_path = join( checkpoint_dir, \"checkpoint_step{:09d}{}.pth\".format(global_step, suffix)) optimizer_state = optimizer.state_dict() if", "priority_loss = w * masked_l1( y_hat[:, :, :priority_bin], y[:, :,", "mel spectrogram if mel_outputs is not None: mel_output = mel[idx].cpu().data.numpy()", "= y.cuda() mel = mel.cuda() done, target_lengths = done.cuda(), target_lengths.cuda()", "train.py [options] options: --data-root=<dir> Directory contains preprocessed features. --checkpoint-dir=<dir> Directory", "= PartialyRandomizedSimilarTimeLengthSampler( frame_lengths, batch_size=hparams.batch_size) # Dataset and Dataloader setup dataset", "texts return texts, speaker_ids else: return texts def collect_features(self, *args):", "# flip against freq axis return np.uint8(cm.magma(spectrogram.T) * 255) def", "frontend, builder import dv3.audio import dv3.lrschedule import torch from torch.utils", "dataset if self.speaker_id is not None: indices = np.array(speaker_ids) ==", "max_target_len % downsample_step assert max_target_len % downsample_step == 0 #", "not None # Learning rate schedule if hparams.lr_schedule is not", "p=hparams.replace_pronunciation_prob) if self.multi_speaker: return np.asarray(seq, dtype=np.int32), int(speaker_id) else: return np.asarray(seq,", "os.path import join, expanduser import random import librosa.display from matplotlib", "linear_outputs = None elif train_postnet: assert speaker_ids is None linear_outputs", "# Override hyper parameters hparams.parse(args[\"--hparams\"]) print(hparams_debug_string()) assert hparams.name == \"deepvoice3\"", "writer.add_audio(\"Predicted audio signal\", signal, global_step, sample_rate=fs) except Exception as e:", "state[key] # https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 def restore_parts(path, model): print(\"Restore part of the", "# shift mask decoder_target_mask = decoder_target_mask[:, r:, :] target_mask =", "None else None data_root = args[\"--data-root\"] if data_root is None:", "def save_alignment(path, attn): plot_alignment(attn.T, path, info=\"{}, {}, step={}\".format( hparams.builder, time_string(),", "= prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Predicted linear spectrogram\", spectrogram, global_step) # Predicted audio", "Variable(torch.from_numpy(soft_mask)) soft_mask = soft_mask.cuda() if use_cuda else soft_mask attn_loss =", "path): return np.load(path) class MelSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(MelSpecDataSource,", "Mel, Y): self.X = X self.Mel = Mel self.Y =", "sequence_length.is_cuda: seq_range_expand = seq_range_expand.cuda() seq_length_expand = sequence_length.unsqueeze(1) \\ .expand_as(seq_range_expand) return", "they have discovered a new particle.\", \"There's a way to", "writer.add_image(\"Predicted mel spectrogram\", mel_output, global_step) # Predicted spectrogram if linear_outputs", "train_postnet: print(\"Training postnet model\") else: assert False, \"must be specified", "PartialyRandomizedSimilarTimeLengthSampler(Sampler): \"\"\"Partially randmoized sampler 1. Sort by lengths 2. Pick", "speaker_id is not None else \"single\" for idx, text in", "= ax.imshow( alignment, aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel =", "if use_cuda: model = model.cuda() optimizer = optim.Adam(model.get_trainable_parameters(), lr=hparams.initial_learning_rate, betas=(", "laboratory say they have discovered a new particle.\", \"There's a", "spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Target linear spectrogram\", spectrogram, global_step) def logit(x,", "done.cuda(), target_lengths.cuda() speaker_ids = speaker_ids.cuda() if ismultispeaker else None #", "# Combine losses if train_seq2seq and train_postnet: loss = mel_loss", "max_input_len = input_lengths.max() W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32) for", "w * mel_binary_div # done: if train_seq2seq: done_loss = binary_criterion(done_hat,", "self.frame_lengths = list(map(int, self.frame_lengths)) return paths def collect_features(self, path): return", "= np.flip(spectrogram, axis=1) # flip against freq axis return np.uint8(cm.magma(spectrogram.T)", "by speaker_id # using multi-speaker dataset as a single speaker", "/ (fs * 0.5) * linear_dim) linear_l1_loss, linear_binary_div = spec_loss(", "e = s + batch_group_size random.shuffle(indices[s:e]) # Permutate batches if", "indent=4))) _frontend = getattr(frontend, hparams.frontend) os.makedirs(checkpoint_dir, exist_ok=True) # Input dataset", "as a single speaker dataset indices = np.array(speaker_ids) == self.speaker_id", "return np.uint8(cm.magma(spectrogram.T) * 255) def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker):", "= input_lengths.long().numpy() decoder_lengths = target_lengths.long().numpy() // r // downsample_step #", "frame_positions=frame_positions, input_lengths=input_lengths) elif train_seq2seq: assert speaker_ids is None mel_outputs, attn,", "+ torch.log(1 + torch.exp(y_hat_logits)) if w > 0: binary_div =", "self).__init__(data_root, 0, speaker_id) class PartialyRandomizedSimilarTimeLengthSampler(Sampler): \"\"\"Partially randmoized sampler 1. Sort", "step {}\".format(global_step)) # idx = np.random.randint(0, len(input_lengths)) idx = min(1,", "lr_schedule_f = getattr(dv3.lrschedule, hparams.lr_schedule) current_lr = lr_schedule_f( init_lr, global_step, **hparams.lr_schedule_kwargs)", "lengths, batch_size=16, batch_group_size=None, permutate=True): self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths)) self.batch_size =", "+ done_loss elif train_postnet: loss = linear_loss # attention if", "# hard coded speaker_ids = [0, 1, 10] if ismultispeaker", "guided_attention(N, max_N, T, max_T, g): W = np.zeros((max_N, max_T), dtype=np.float32)", "+ linear_loss + done_loss elif train_seq2seq: loss = mel_loss +", "batch], dtype=np.float32) y_batch = torch.FloatTensor(c) # text positions text_positions =", "\"train.txt\") with open(meta, \"rb\") as f: lines = f.readlines() l", "= hparams.presets[hparams.preset] import json hparams.parse_json(json.dumps(preset)) print(\"Override hyper parameters with preset", ":] target_mask = target_mask[:, r:, :] else: decoder_target_mask, target_mask =", "import Variable from torch import nn from torch import optim", "pass def save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel, y, input_lengths,", "never gone out of style.\", \"President Trump met with other", "epoch, }, checkpoint_path) print(\"Saved checkpoint:\", checkpoint_path) def build_model(): model =", "linear_dim = model.linear_dim r = hparams.outputs_per_step downsample_step = hparams.downsample_step current_lr", "writer for tensorboard if log_event_path is None: log_event_path = \"log/run-test\"", "Train! try: train(model, data_loader, optimizer, writer, init_lr=hparams.initial_learning_rate, checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.checkpoint_interval, nepochs=hparams.nepochs,", "mel = synthesis.tts( model, text, p=0, speaker_id=speaker_id, fast=False) signal /=", "= 0. for step, (x, input_lengths, mel, y, positions, done,", "padding_idx=hparams.padding_idx, dropout=hparams.dropout, kernel_size=hparams.kernel_size, encoder_channels=hparams.encoder_channels, decoder_channels=hparams.decoder_channels, converter_channels=hparams.converter_channels, use_memory_mask=hparams.use_memory_mask, trainable_positional_encodings=hparams.trainable_positional_encodings, force_monotonic_attention=hparams.force_monotonic_attention, use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input,", "if sequence_length.is_cuda: seq_range_expand = seq_range_expand.cuda() seq_length_expand = sequence_length.unsqueeze(1) \\ .expand_as(seq_range_expand)", "torch.sort(torch.LongTensor(lengths)) self.batch_size = batch_size if batch_group_size is None: batch_group_size =", "self.frame_lengths)) return paths def collect_features(self, path): return np.load(path) class MelSpecDataSource(_NPYDataSource):", "key = \"seq2seq.encoder.embed_tokens.weight\" model.seq2seq.encoder.embed_tokens.weight.data = state[key] # https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 def restore_parts(path,", "physical explanation.\", ] import dv3.synthesis synthesis._frontend = _frontend eval_output_dir =", "= soft_mask.cuda() if use_cuda else soft_mask attn_loss = (attn *", "# (B, T, 1) if mask is None: mask =", "if hparams.save_optimizer_state else None torch.save({ \"state_dict\": m.state_dict(), \"optimizer\": optimizer_state, \"global_step\":", "= w * masked_mean(z, mask) + (1 - w) *", "import frontend, builder import dv3.audio import dv3.lrschedule import torch from", "spectrogram if mel_outputs is not None: mel_output = mel_outputs[idx].cpu().data.numpy() mel_output", "_frontend = None # to be set later def _pad(seq,", "mel.size(-1)) linear_outputs = None elif train_postnet: assert speaker_ids is None", "# Dataset and Dataloader setup dataset = PyTorchDataset(X, Mel, Y)", "= decoder_target_mask[:, r:, :] target_mask = target_mask[:, r:, :] else:", "= running_loss / (len(data_loader)) writer.add_scalar(\"loss (per epoch)\", averaged_loss, global_epoch) print(\"Loss:", "decoder_target_mask[:, r:, :] target_mask = target_mask[:, r:, :] else: decoder_target_mask,", "# The deepvoice3 model from dv3.deepvoice3_pytorch import frontend, builder import", "= guided_attentions(input_lengths, decoder_lengths, attn.size(-2), g=hparams.guided_attention_sigma) soft_mask = Variable(torch.from_numpy(soft_mask)) soft_mask =", "if log_event_path is None: log_event_path = \"log/run-test\" + str(datetime.now()).replace(\" \",", "\", \"_\") print(\"Los event path: {}\".format(log_event_path)) writer = SummaryWriter(log_dir=log_event_path) #", "downsample_step - max_target_len % downsample_step assert max_target_len % downsample_step ==", "downsample_step=hparams.downsample_step, padding_idx=hparams.padding_idx, dropout=hparams.dropout, kernel_size=hparams.kernel_size, encoder_channels=hparams.encoder_channels, decoder_channels=hparams.decoder_channels, converter_channels=hparams.converter_channels, use_memory_mask=hparams.use_memory_mask, trainable_positional_encodings=hparams.trainable_positional_encodings, force_monotonic_attention=hparams.force_monotonic_attention,", "flags done = np.array([_pad(np.zeros(len(x[1]) // r // downsample_step - 1),", "is not None and priority_w > 0: if w >", "+= b_pad * downsample_step a = np.array([_pad(x[0], max_input_len) for x", "= args else: text = args[0] seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob)", "preset \\\"{}\\\": {}\".format( hparams.preset, json.dumps(preset, indent=4))) _frontend = getattr(frontend, hparams.frontend)", "(0, 0)], mode=\"constant\", constant_values=0) return x def plot_alignment(alignment, path, info=None):", "warn(str(e)) pass dv3.audio.save_wav(signal, path) # Target mel spectrogram if mel_outputs", "target, lengths=None, mask=None, max_len=None): if lengths is None and mask", "alignment) # Save averaged alignment alignment_dir = join(checkpoint_dir, \"alignment_ave\") os.makedirs(alignment_dir,", "-y * y_hat_logits + torch.log(1 + torch.exp(y_hat_logits)) if w >", "None elif train_postnet: assert speaker_ids is None linear_outputs = model.postnet(mel)", "= self.sorted_indices.clone() batch_group_size = self.batch_group_size s, e = 0, 0", "have accepted this as a miracle without any physical explanation.\",", "int self.frame_lengths = list(map(int, self.frame_lengths)) return paths def collect_features(self, path):", "= data_root self.speaker_ids = None self.multi_speaker = False # If", "is not None: load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer) if checkpoint_path is", "dataset as a single speaker dataset if self.speaker_id is not", "None self.multi_speaker = False # If not None, filter by", "super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id) class PartialyRandomizedSimilarTimeLengthSampler(Sampler): \"\"\"Partially randmoized sampler 1.", "= text_positions.cuda() frame_positions = frame_positions.cuda() if train_postnet: y = y.cuda()", "linear_binary_div = spec_loss( linear_outputs[:, :-r, :], y[:, r:, :], target_mask,", "= Variable(done) target_lengths = Variable(target_lengths) speaker_ids = Variable(speaker_ids) if ismultispeaker", "def _pad_2d(x, max_len, b_pad=0): x = np.pad(x, [(b_pad, max_len -", "def guided_attention(N, max_N, T, max_T, g): W = np.zeros((max_N, max_T),", "= len(l) == 5 self.frame_lengths = list( map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[2]),", "!= \"\": preset = hparams.presets[hparams.preset] import json hparams.parse_json(json.dumps(preset)) print(\"Override hyper", "model def load_checkpoint(path, model, optimizer, reset_optimizer): global global_step global global_epoch", "lr=hparams.initial_learning_rate, betas=( hparams.adam_beta1, hparams.adam_beta2), eps=hparams.adam_eps, weight_decay=hparams.weight_decay) if checkpoint_restore_parts is not", "Mel writer.add_image(\"(Eval) Predicted mel spectrogram text{}_{}\".format(idx, speaker_str), prepare_spec_image(mel), global_step) #", "model. --restore-parts=<path> Restore part of the model. --log-event-path=<name> Log event", "= False # If not None, filter by speaker_id self.speaker_id", "builder import dv3.audio import dv3.lrschedule import torch from torch.utils import", "* mel_l1_loss + w * mel_binary_div # done: if train_seq2seq:", "downsample_step), max_len=mel.size(1)).unsqueeze(-1) if downsample_step > 1: # spectrogram-domain mask target_mask", "torch.FloatTensor(b) c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in batch],", "list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[3], lines)) if self.multi_speaker: speaker_ids = list(map(lambda l:", "writer.add_scalar(\"learning rate\", current_lr, global_step) global_step += 1 running_loss += loss.data[0]", "clip_thresh) optimizer.step() # Logs writer.add_scalar(\"loss\", float(loss.data[0]), global_step) if train_seq2seq: writer.add_scalar(\"done_loss\",", "not None: indices = np.array(speaker_ids) == self.speaker_id texts = list(np.array(texts)[indices])", "text_positions, frame_positions = positions # Downsample mel spectrogram if downsample_step", "in batch]) done = torch.FloatTensor(done).unsqueeze(-1) if multi_speaker: speaker_ids = torch.LongTensor([x[3]", "torch.backends.cudnn as cudnn from torch.utils import data as data_utils from", "batch]) else: speaker_ids = None return x_batch, input_lengths, mel_batch, y_batch,", "= min(batch_size * 32, len(self.lengths)) if batch_group_size % batch_size !=", "Audio path = join(eval_output_dir, \"step{:09d}_text{}_{}_predicted.wav\".format( global_step, idx, speaker_str)) dv3.audio.save_wav(signal, path)", "from checkpoint path. --train-seq2seq-only Train only seq2seq model. --train-postnet-only Train", "= torch.FloatTensor(c) # text positions text_positions = np.array([_pad(np.arange(1, len(x[0]) +", "self.data_root = data_root self.speaker_ids = None self.multi_speaker = False #", "checkpoint[\"global_epoch\"] return model def _load_embedding(path, model): state = torch.load(path)[\"state_dict\"] key", "deepvoice3 model from dv3.deepvoice3_pytorch import frontend, builder import dv3.audio import", "__init__(self, data_root, speaker_id=None): super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id) class LinearSpecDataSource(_NPYDataSource): def", "frame_positions=frame_positions, input_lengths=input_lengths) # reshape mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1)) linear_outputs", "reshape mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1)) linear_outputs = None elif", "mel spectrogram if mel_outputs is not None: mel_output = mel_outputs[idx].cpu().data.numpy()", "= nn.L1Loss() w = hparams.masked_loss_weight # L1 loss if w", "# Prepare sampler frame_lengths = Mel.file_data_source.frame_lengths sampler = PartialyRandomizedSimilarTimeLengthSampler( frame_lengths,", "for x in batch], dtype=np.int) x_batch = torch.LongTensor(a) input_lengths =", "batch_group_size=None, permutate=True): self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths)) self.batch_size = batch_size if", "mask if we use masked loss if hparams.masked_loss_weight > 0:", "\"\"\"Create batch\"\"\" r = hparams.outputs_per_step downsample_step = hparams.downsample_step multi_speaker =", "if k in model_dict} model_dict.update(valid_state_dict) model.load_state_dict(model_dict) if __name__ == \"__main__\":", "target_lengths, speaker_ids) \\ in tqdm(enumerate(data_loader)): model.train() ismultispeaker = speaker_ids is", "Load embedding from checkpoint. --speaker-id=<N> Use specific speaker of data", "False # If not None, filter by speaker_id self.speaker_id =", "list(map(lambda f: join(self.data_root, f), paths)) if multi_speaker and self.speaker_id is", "--reset-optimizer Reset optimizer. --load-embedding=<path> Load embedding from checkpoint. --speaker-id=<N> Use", "target_mask = sequence_mask( target_lengths, max_len=y.size(1)).unsqueeze(-1) else: target_mask = decoder_target_mask #", "losses if train_seq2seq and train_postnet: loss = mel_loss + linear_loss", "max_input_len), dtype=np.float32) for b in range(B): W[b] = guided_attention(input_lengths[b], max_input_len,", "both if not specified if not train_seq2seq and not train_postnet:", "multi-speaker datasets. -h, --help Show this help message and exit", "\"alignment_ave\") os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, \"step{:09d}_alignment.png\".format(global_step)) alignment = attn.mean(0)[idx].cpu().data.numpy()", "else: assert mask is None l1_loss = l1(y_hat, y) #", "# Lengths input_lengths = [len(x[0]) for x in batch] max_input_len", "hparams_debug_string fs = hparams.sample_rate global_step = 0 global_epoch = 0", "spectrogram, global_step) def logit(x, eps=1e-8): return torch.log(x + eps) -", "data_loader = data_utils.DataLoader( dataset, batch_size=hparams.batch_size, num_workers=hparams.num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=hparams.pin_memory) print(\"dataloader_prepared\")", "whole model\") train_seq2seq, train_postnet = True, True if train_seq2seq: print(\"Training", "\"must be specified wrong args\" # Override hyper parameters hparams.parse(args[\"--hparams\"])", "y, mask, priority_bin=None, priority_w=0): masked_l1 = MaskedL1Loss() l1 = nn.L1Loss()", "seq2seq model\") elif train_postnet: print(\"Training postnet model\") else: assert False,", "print(\"Loss: {}\".format(running_loss / (len(data_loader)))) global_epoch += 1 def save_checkpoint(model, optimizer,", "not None: load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer) if checkpoint_path is not", "else: text = args[0] seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob) if self.multi_speaker:", "+= downsample_step - max_target_len % downsample_step assert max_target_len % downsample_step", "optimizer. --load-embedding=<path> Load embedding from checkpoint. --speaker-id=<N> Use specific speaker", "speaker_str)) dv3.audio.save_wav(signal, path) try: writer.add_audio(\"(Eval) Predicted audio signal {}_{}\".format(idx, speaker_str),", "linear_binary_div.data[0]), global_step) if train_seq2seq and hparams.use_guided_attention: writer.add_scalar(\"attn_loss\", float(attn_loss.data[0]), global_step) if", "loss = mel_loss + linear_loss + done_loss elif train_seq2seq: loss", "if global_step > 0 and global_step % hparams.eval_interval == 0:", "from warnings import warn from dv3.hparams import hparams, hparams_debug_string fs", "return paths def collect_features(self, path): return np.load(path) class MelSpecDataSource(_NPYDataSource): def", "== 5 self.frame_lengths = list( map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[2]), lines)) paths", "max_decoder_target_len, constant_values=1) for x in batch]) done = torch.FloatTensor(done).unsqueeze(-1) if", "is None: log_event_path = \"log/run-test\" + str(datetime.now()).replace(\" \", \"_\") print(\"Los", "os from tensorboardX import SummaryWriter from matplotlib import cm from", "np.array(speaker_ids) == self.speaker_id texts = list(np.array(texts)[indices]) self.multi_speaker = False return", "exist_ok=True) # hard coded speaker_ids = [0, 1, 10] if", "np.min(spectrogram)) spectrogram = np.flip(spectrogram, axis=1) # flip against freq axis", "Variable(x), Variable(mel), Variable(y) text_positions = Variable(text_positions) frame_positions = Variable(frame_positions) done", "use_cuda else soft_mask attn_loss = (attn * soft_mask).mean() loss +=", "global_step) writer.add_scalar(\"linear_l1_loss\", float(linear_l1_loss.data[0]), global_step) writer.add_scalar(\"linear_binary_div_loss\", float( linear_binary_div.data[0]), global_step) if train_seq2seq", "% batch_size self.batch_group_size = batch_group_size assert batch_group_size % batch_size ==", "0 for i in range(len(indices) // batch_group_size): s = i", "and train_postnet: loss = mel_loss + linear_loss + done_loss elif", "else None # Create mask if we use masked loss", "plot_alignment(alignment, path, info=None): fig, ax = plt.subplots() im = ax.imshow(", "speaker_ids = torch.LongTensor([x[3] for x in batch]) else: speaker_ids =", "save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel, y, input_lengths, checkpoint_dir=None): print(\"Save", "checkpoint from: {}\".format(path)) checkpoint = torch.load(path) model.load_state_dict(checkpoint[\"state_dict\"]) if not reset_optimizer:", "x in batch], dtype=np.int) x_batch = torch.LongTensor(a) input_lengths = torch.LongTensor(input_lengths)", "if speaker_id is not None else \"single\" for idx, text", "from datetime import datetime # The deepvoice3 model from dv3.deepvoice3_pytorch", "return torch.log(x + eps) - torch.log(1 - x + eps)", "averaged alignment alignment_dir = join(checkpoint_dir, \"alignment_ave\") os.makedirs(alignment_dir, exist_ok=True) path =", "without any physical explanation.\", ] import dv3.synthesis synthesis._frontend = _frontend", "None, None # Losses w = hparams.binary_divergence_weight # mel: if", "class _NPYDataSource(FileDataSource): def __init__(self, data_root, col, speaker_id=None): self.data_root = data_root", "= sequence_mask(lengths, max_len).unsqueeze(-1) # (B, T, D) mask_ = mask.expand_as(input)", "text positions text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len) for", "\"\"\" def __init__(self, lengths, batch_size=16, batch_group_size=None, permutate=True): self.lengths, self.sorted_indices =", "if self.permutate: perm = np.arange(len(indices[:e]) // self.batch_size) random.shuffle(perm) indices[:e] =", "model_dict} model_dict.update(valid_state_dict) model.load_state_dict(model_dict) if __name__ == \"__main__\": args = docopt(__doc__)", "1)) save_alignment(path, alignment) # Save averaged alignment alignment_dir = join(checkpoint_dir,", "the model. --log-event-path=<name> Log event path. --reset-optimizer Reset optimizer. --load-embedding=<path>", "== 0: eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker) # Update loss.backward()", "checkpoint path if given. --checkpoint-seq2seq=<path> Restore seq2seq model from checkpoint", "checkpoint_dir, ismultispeaker) # Update loss.backward() if clip_thresh > 0: grad_norm", "/ N - t / T)**2 / (2 * g", "= f.readlines() l = lines[0].decode(\"utf-8\").split(\"|\") assert len(l) == 4 or", "save_states( global_step, writer, mel_outputs, linear_outputs, attn, mel, y, input_lengths, checkpoint_dir)", "= Variable(frame_positions) done = Variable(done) target_lengths = Variable(target_lengths) speaker_ids =", "mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image(\"Target mel spectrogram\", mel_output, global_step) # Target", "max_input_len) for x in batch], dtype=np.int) x_batch = torch.LongTensor(a) input_lengths", "= spec_loss( mel_outputs[:, :-r, :], mel[:, r:, :], decoder_target_mask) mel_loss", "Alignment # Multi-hop attention if attn is not None and", "speaker_ids = None return x_batch, input_lengths, mel_batch, y_batch, \\ (text_positions,", "= plt.subplots() im = ax.imshow( alignment, aspect='auto', origin='lower', interpolation='none') fig.colorbar(im,", "= join(eval_output_dir, \"step{:09d}_text{}_{}_predicted.wav\".format( global_step, idx, speaker_str)) dv3.audio.save_wav(signal, path) try: writer.add_audio(\"(Eval)", "case for multi-speaker datasets. -h, --help Show this help message", "exit \"\"\" from docopt import docopt import sys from os.path", "* mel_binary_div # done: if train_seq2seq: done_loss = binary_criterion(done_hat, done)", "{}\".format(global_step)) # idx = np.random.randint(0, len(input_lengths)) idx = min(1, len(input_lengths)", "train_seq2seq: writer.add_scalar(\"done_loss\", float(done_loss.data[0]), global_step) writer.add_scalar(\"mel loss\", float(mel_loss.data[0]), global_step) writer.add_scalar(\"mel_l1_loss\", float(mel_l1_loss.data[0]),", "optimizer_state, \"global_step\": step, \"global_epoch\": epoch, }, checkpoint_path) print(\"Saved checkpoint:\", checkpoint_path)", "not None: linear_output = y[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Target linear", "max_target_len, g=0.2): B = len(input_lengths) max_input_len = input_lengths.max() W =", "global_step) # Audio path = join(eval_output_dir, \"step{:09d}_text{}_{}_predicted.wav\".format( global_step, idx, speaker_str))", "if train_seq2seq: done_loss = binary_criterion(done_hat, done) # linear: if train_postnet:", "and hparams.use_guided_attention: writer.add_scalar(\"attn_loss\", float(attn_loss.data[0]), global_step) if clip_thresh > 0: writer.add_scalar(\"gradient", "--train-seq2seq-only Train only seq2seq model. --train-postnet-only Train only postnet model.", "signal /= np.max(np.abs(signal)) # Alignment path = join(eval_output_dir, \"step{:09d}_text{}_{}_alignment.png\".format( global_step,", "elif train_postnet: loss = linear_loss # attention if train_seq2seq and", "optimizer state from {}\".format(path)) optimizer.load_state_dict(checkpoint[\"optimizer\"]) global_step = checkpoint[\"global_step\"] global_epoch =", "= args[\"--train-postnet-only\"] # train both if not specified if not", "hparams.builder)( n_speakers=hparams.n_speakers, speaker_embed_dim=hparams.speaker_embed_dim, n_vocab=_frontend.n_vocab, embed_dim=hparams.text_embed_dim, mel_dim=hparams.num_mels, linear_dim=hparams.fft_size // 2 +", "torch.utils.data.sampler import Sampler import numpy as np from numba import", "if not train_seq2seq and not train_postnet: print(\"Training whole model\") train_seq2seq,", "mel_l1_loss, mel_binary_div = spec_loss( mel_outputs[:, :-r, :], mel[:, r:, :],", "from dv3.hparams import hparams, hparams_debug_string fs = hparams.sample_rate global_step =", "hparams.masked_loss_weight > 0: # decoder output domain mask decoder_target_mask =", "- len(x) - b_pad), (0, 0)], mode=\"constant\", constant_values=0) return x", "data as data_utils from torch.utils.data.sampler import Sampler import numpy as", "paths = list(map(lambda l: l.decode(\"utf-8\").split(\"|\")[self.col], lines)) paths = list(map(lambda f:", ":priority_bin]) else: priority_loss = l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])", "single speaker dataset indices = np.array(speaker_ids) == self.speaker_id paths =", "mask is None: raise RuntimeError(\"Should provide either lengths or mask\")", "# if b_pad > 0: # s, e = s", "alignment) tag = \"averaged_alignment\" writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)", "= hparams.downsample_step multi_speaker = len(batch[0]) == 4 # Lengths input_lengths", "__init__(self, X, Mel, Y): self.X = X self.Mel = Mel", "to measure the acute emotional intelligence that has never gone", "(text_positions, frame_positions), done, target_lengths, speaker_ids def time_string(): return datetime.now().strftime('%Y-%m-%d %H:%M')", "spectrogram = (spectrogram - np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram)) spectrogram", "None: restore_parts(checkpoint_restore_parts, model) # Load checkpoints if checkpoint_postnet_path is not", "+ 1), max_input_len) for x in batch], dtype=np.int) text_positions =", "seq2seq model from checkpoint path. --checkpoint-postnet=<path> Restore postnet model from", "x_batch, input_lengths, mel_batch, y_batch, \\ (text_positions, frame_positions), done, target_lengths, speaker_ids", "self.multi_speaker: text, speaker_id = args else: text = args[0] seq", "positions, done, target_lengths, speaker_ids) \\ in tqdm(enumerate(data_loader)): model.train() ismultispeaker =", "2 + 1, r=hparams.outputs_per_step, downsample_step=hparams.downsample_step, padding_idx=hparams.padding_idx, dropout=hparams.dropout, kernel_size=hparams.kernel_size, encoder_channels=hparams.encoder_channels, decoder_channels=hparams.decoder_channels,", "(x, input_lengths, mel, y, positions, done, target_lengths, speaker_ids) \\ in", "jit from nnmnkwii.datasets import FileSourceDataset, FileDataSource from os.path import join,", "B = len(input_lengths) max_input_len = input_lengths.max() W = np.zeros((B, max_target_len,", "help message and exit \"\"\" from docopt import docopt import", "aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel = 'Decoder timestep' if", "train_seq2seq or train_postnet global global_step, global_epoch while global_epoch < nepochs:", "+ str(datetime.now()).replace(\" \", \"_\") print(\"Los event path: {}\".format(log_event_path)) writer =", "optimizer_state = checkpoint[\"optimizer\"] if optimizer_state is not None: print(\"Load optimizer", "None: max_len = sequence_length.data.max() batch_size = sequence_length.size(0) seq_range = torch.arange(0,", "l1(y_hat, y) # Priority L1 loss if priority_bin is not", "in model_dict} model_dict.update(valid_state_dict) model.load_state_dict(model_dict) if __name__ == \"__main__\": args =", "hparams.frontend) os.makedirs(checkpoint_dir, exist_ok=True) # Input dataset definitions X = FileSourceDataset(TextDataSource(data_root,", "for idx, text in enumerate(texts): signal, alignment, _, mel =", "> 0: if w > 0: priority_loss = w *", "max_decoder_target_len + 1 # if b_pad > 0: # s,", "ismultispeaker = speaker_ids is not None # Learning rate schedule", "elif train_seq2seq: assert speaker_ids is None mel_outputs, attn, done_hat, _", "class LinearSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id)", "+ 1)) os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, \"step{:09d}_layer_{}_alignment.png\".format( global_step, i", "input_lengths = input_lengths.long().numpy() decoder_lengths = target_lengths.long().numpy() // r // downsample_step", "text, speaker_id = self.X[idx] return text, self.Mel[idx], self.Y[idx], speaker_id else:", "assert speaker_ids is None mel_outputs, attn, done_hat, _ = model.seq2seq(", "averaged_loss, global_epoch) print(\"Loss: {}\".format(running_loss / (len(data_loader)))) global_epoch += 1 def", "sampler 1. Sort by lengths 2. Pick a small patch", "{}, step={}\".format( hparams.builder, time_string(), global_step)) def prepare_spec_image(spectrogram): # [0, 1]", "try: writer.add_audio(\"(Eval) Predicted audio signal {}_{}\".format(idx, speaker_str), signal, global_step, sample_rate=fs)", "l.decode(\"utf-8\").split(\"|\")[3], lines)) if self.multi_speaker: speaker_ids = list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines))", "binary_criterion = nn.BCELoss() assert train_seq2seq or train_postnet global global_step, global_epoch", "* l1_loss + priority_w * priority_loss # Binary divergence loss", "v in state.items() if k in model_dict} model_dict.update(valid_state_dict) model.load_state_dict(model_dict) if", "model = build_model() if use_cuda: model = model.cuda() optimizer =", "done: if train_seq2seq: done_loss = binary_criterion(done_hat, done) # linear: if", "at the CERN laboratory say they have discovered a new", "None l1_loss = w * masked_l1(y_hat, y, mask=mask) + (1", "if mel_outputs is not None: mel_output = mel[idx].cpu().data.numpy() mel_output =", "dataset, batch_size=hparams.batch_size, num_workers=hparams.num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=hparams.pin_memory) print(\"dataloader_prepared\") # Model model", "in batch], dtype=np.int) x_batch = torch.LongTensor(a) input_lengths = torch.LongTensor(input_lengths) target_lengths", "not None l1_loss = w * masked_l1(y_hat, y, mask=mask) +", "batch] max_input_len = max(input_lengths) target_lengths = [len(x[1]) for x in", "# aha, need to cast numpy.int64 to int self.frame_lengths =", ":].view(-1) # Handle last elements s += batch_group_size if s", "/ (np.max(spectrogram) - np.min(spectrogram)) spectrogram = np.flip(spectrogram, axis=1) # flip", "eps=1e-8): return torch.log(x + eps) - torch.log(1 - x +", "in optimizer.param_groups: param_group['lr'] = current_lr optimizer.zero_grad() # Used for Position", "if self.multi_speaker: text, speaker_id = self.X[idx] return text, self.Mel[idx], self.Y[idx],", "message and exit \"\"\" from docopt import docopt import sys", "// batch_group_size): s = i * batch_group_size e = s", "global_step, **hparams.lr_schedule_kwargs) for param_group in optimizer.param_groups: param_group['lr'] = current_lr optimizer.zero_grad()", "Hyper parameters [default: ]. --checkpoint=<path> Restore model from checkpoint path", "target_mask = decoder_target_mask # shift mask decoder_target_mask = decoder_target_mask[:, r:,", "global_step) writer.add_scalar(\"mel_binary_div_loss\", float(mel_binary_div.data[0]), global_step) if train_postnet: writer.add_scalar(\"linear_loss\", float(linear_loss.data[0]), global_step) writer.add_scalar(\"linear_l1_loss\",", "False _frontend = None # to be set later def", "is None and mask is None: raise RuntimeError(\"Should provide either", "network or variational auto-encoder.\", \"Please call Stella.\", \"Some have accepted", "else: target_mask = decoder_target_mask # shift mask decoder_target_mask = decoder_target_mask[:,", "where to save model checkpoints [default: checkpoints]. --hparams=<parmas> Hyper parameters", "# Predicted spectrogram if linear_outputs is not None: linear_output =", "not None: linear_output = linear_outputs[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Predicted linear", "(0, max_len - len(seq)), mode='constant', constant_values=constant_values) def _pad_2d(x, max_len, b_pad=0):", "frame_lengths, batch_size=hparams.batch_size) # Dataset and Dataloader setup dataset = PyTorchDataset(X,", "for x in batch], dtype=np.float32) y_batch = torch.FloatTensor(c) # text", "20 conference.\", \"Generative adversarial network or variational auto-encoder.\", \"Please call", "event path. --reset-optimizer Reset optimizer. --load-embedding=<path> Load embedding from checkpoint.", "self.speaker_id is not None: indices = np.array(speaker_ids) == self.speaker_id texts", "print(\"Training postnet model\") else: assert False, \"must be specified wrong", "len(seq)), mode='constant', constant_values=constant_values) def _pad_2d(x, max_len, b_pad=0): x = np.pad(x,", "speaker_str)) save_alignment(path, alignment) tag = \"eval_averaged_alignment_{}_{}\".format(idx, speaker_str) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T)", "= \"_postnet\" m = model.postnet checkpoint_path = join( checkpoint_dir, \"checkpoint_step{:09d}{}.pth\".format(global_step,", "t] = 1 - np.exp(-(n / N - t /", "mask): # (B, T, D) mask_ = mask.expand_as(y) return (y", "\"eval_averaged_alignment_{}_{}\".format(idx, speaker_str) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # Mel", "loss if hparams.binary_divergence_weight <= 0: binary_div = Variable(y.data.new(1).zero_()) else: y_hat_logits", "\"_postnet\" m = model.postnet checkpoint_path = join( checkpoint_dir, \"checkpoint_step{:09d}{}.pth\".format(global_step, suffix))", "{}\".format(load_embedding)) _load_embedding(load_embedding, model) # Setup summary writer for tensorboard if", "= [len(x[0]) for x in batch] max_input_len = max(input_lengths) target_lengths", "coded speaker_ids = [0, 1, 10] if ismultispeaker else [None]", "(np.max(spectrogram) - np.min(spectrogram)) spectrogram = np.flip(spectrogram, axis=1) # flip against", "model.get_trainable_parameters(), clip_thresh) optimizer.step() # Logs writer.add_scalar(\"loss\", float(loss.data[0]), global_step) if train_seq2seq:", "build_model() if use_cuda: model = model.cuda() optimizer = optim.Adam(model.get_trainable_parameters(), lr=hparams.initial_learning_rate,", "--speaker-id=<N> Use specific speaker of data in case for multi-speaker", "ismultispeaker): # harded coded texts = [ \"Scientists at the", "as data_utils from torch.utils.data.sampler import Sampler import numpy as np", "part of the model. --log-event-path=<name> Log event path. --reset-optimizer Reset", "speaker_embed_dim=hparams.speaker_embed_dim, n_vocab=_frontend.n_vocab, embed_dim=hparams.text_embed_dim, mel_dim=hparams.num_mels, linear_dim=hparams.fft_size // 2 + 1, r=hparams.outputs_per_step,", "model from: {}\".format(path)) state = torch.load(path)[\"state_dict\"] model_dict = model.state_dict() valid_state_dict", "indices = np.array(speaker_ids) == self.speaker_id texts = list(np.array(texts)[indices]) self.multi_speaker =", "1) input_length = input_lengths[idx] # Alignment # Multi-hop attention if", "checkpoint_restore_parts is not None: restore_parts(checkpoint_restore_parts, model) # Load checkpoints if", "l1 = nn.L1Loss() w = hparams.masked_loss_weight # L1 loss if", "for x in batch]) done = torch.FloatTensor(done).unsqueeze(-1) if multi_speaker: speaker_ids", "= z.mean() return l1_loss, binary_div @jit(nopython=True) def guided_attention(N, max_N, T,", "= list(map(lambda l: int(l.decode(\"utf-8\").split(\"|\")[-1]), lines)) # Filter by speaker_id #", "w = hparams.masked_loss_weight # L1 loss if w > 0:", "None: lr_schedule_f = getattr(dv3.lrschedule, hparams.lr_schedule) current_lr = lr_schedule_f( init_lr, global_step,", "= target_mask[:, r:, :] else: decoder_target_mask, target_mask = None, None", "if use_cuda else soft_mask attn_loss = (attn * soft_mask).mean() loss", "masked loss if hparams.masked_loss_weight > 0: # decoder output domain", "dtype=np.int) x_batch = torch.LongTensor(a) input_lengths = torch.LongTensor(input_lengths) target_lengths = torch.LongTensor(target_lengths)", "audio signal signal = dv3.audio.inv_spectrogram(linear_output.T) signal /= np.max(np.abs(signal)) path =", "PartialyRandomizedSimilarTimeLengthSampler( frame_lengths, batch_size=hparams.batch_size) # Dataset and Dataloader setup dataset =", "batch\"\"\" r = hparams.outputs_per_step downsample_step = hparams.downsample_step multi_speaker = len(batch[0])", "if use_cuda: if train_seq2seq: x = x.cuda() text_positions = text_positions.cuda()", "mel_loss = (1 - w) * mel_l1_loss + w *", "(1 - w) * linear_l1_loss + w * linear_binary_div #", "part of the model from: {}\".format(path)) state = torch.load(path)[\"state_dict\"] model_dict", "torch.LongTensor(text_positions) max_decoder_target_len = max_target_len // r // downsample_step # frame", "r // downsample_step # Feed data x, mel, y =", "i in range(len(indices) // batch_group_size): s = i * batch_group_size", "train_postnet: loss = mel_loss + linear_loss + done_loss elif train_seq2seq:", "speaker_id=speaker_id, fast=False) signal /= np.max(np.abs(signal)) # Alignment path = join(eval_output_dir,", "cast numpy.int64 to int self.frame_lengths = list(map(int, self.frame_lengths)) return paths", "seq2seq model. --train-postnet-only Train only postnet model. --restore-parts=<path> Restore part", "torch.utils import data as data_utils from torch.utils.data.sampler import Sampler import", "y_hat_logits = logit(y_hat) z = -y * y_hat_logits + torch.log(1", "hparams.name == \"deepvoice3\" # Presets if hparams.preset is not None", "max_target_len = max(target_lengths) if max_target_len % r != 0: max_target_len", "linear_outputs = model.postnet(mel) mel_outputs, attn, done_hat = None, None, None", "torch.log(1 + torch.exp(y_hat_logits)) if w > 0: binary_div = w", "random.shuffle(perm) indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1) # Handle last elements", "torch import nn from torch import optim import torch.backends.cudnn as", "[] self.speaker_id = speaker_id def collect_files(self): meta = join(self.data_root, \"train.txt\")", "def _pad(seq, max_len, constant_values=0): return np.pad(seq, (0, max_len - len(seq)),", "= np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32) mel_batch", "255), global_step) # save files as well for now alignment_dir", "== \"__main__\": args = docopt(__doc__) print(\"Command line args:\\n\", args) checkpoint_dir", "w * masked_l1(y_hat, y, mask=mask) + (1 - w) *", "import tqdm, trange from datetime import datetime # The deepvoice3", "+ (1 - w) * z.mean() else: binary_div = z.mean()", "m.state_dict(), \"optimizer\": optimizer_state, \"global_step\": step, \"global_epoch\": epoch, }, checkpoint_path) print(\"Saved", "None: print(\"Loading embedding from {}\".format(load_embedding)) _load_embedding(load_embedding, model) # Setup summary", "input, target, lengths=None, mask=None, max_len=None): if lengths is None and", "None # Losses w = hparams.binary_divergence_weight # mel: if train_seq2seq:", "Predicted audio signal {}_{}\".format(idx, speaker_str), signal, global_step, sample_rate=fs) except Exception", "* y_hat_logits + torch.log(1 + torch.exp(y_hat_logits)) if w > 0:", "None linear_outputs = model.postnet(mel) mel_outputs, attn, done_hat = None, None,", "assert speaker_ids is None linear_outputs = model.postnet(mel) mel_outputs, attn, done_hat", "is not None and attn.dim() == 4: for i, alignment", "print(hparams_debug_string()) assert hparams.name == \"deepvoice3\" # Presets if hparams.preset is", "self.speaker_ids = None self.multi_speaker = False # If not None,", "linear_outputs[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Predicted linear spectrogram\", spectrogram, global_step) #", "writer.add_scalar(\"linear_binary_div_loss\", float( linear_binary_div.data[0]), global_step) if train_seq2seq and hparams.use_guided_attention: writer.add_scalar(\"attn_loss\", float(attn_loss.data[0]),", "hparams.preset is not None and hparams.preset != \"\": preset =", "global_step, global_epoch while global_epoch < nepochs: running_loss = 0. for", "= torch.LongTensor(target_lengths) b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in", "= torch.LongTensor(text_positions) max_decoder_target_len = max_target_len // r // downsample_step #", "= col self.frame_lengths = [] self.speaker_id = speaker_id def collect_files(self):", "input_lengths.max() W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32) for b in", "_pad(seq, max_len, constant_values=0): return np.pad(seq, (0, max_len - len(seq)), mode='constant',", "= current_lr optimizer.zero_grad() # Used for Position encoding text_positions, frame_positions", "speaker_str), prepare_spec_image(mel), global_step) # Audio path = join(eval_output_dir, \"step{:09d}_text{}_{}_predicted.wav\".format( global_step,", "None and priority_w > 0: if w > 0: priority_loss", "= np.pad(x, [(b_pad, max_len - len(x) - b_pad), (0, 0)],", "guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2): B = len(input_lengths) max_input_len = input_lengths.max()", "os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, \"step{:09d}_layer_{}_alignment.png\".format( global_step, i + 1))", "\"alignment_layer{}\".format(i + 1) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) #", "= \"_seq2seq\" m = model.seq2seq elif train_postnet: suffix = \"_postnet\"", "not reset_optimizer: optimizer_state = checkpoint[\"optimizer\"] if optimizer_state is not None:", "super(MaskedL1Loss, self).__init__() self.criterion = nn.L1Loss(size_average=False) def forward(self, input, target, lengths=None,", "W def guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2): B = len(input_lengths) max_input_len", "Variable(done) target_lengths = Variable(target_lengths) speaker_ids = Variable(speaker_ids) if ismultispeaker else", "= list(np.array(paths)[indices]) self.frame_lengths = list(np.array(self.frame_lengths)[indices]) # aha, need to cast", "= [0, 1, 10] if ismultispeaker else [None] for speaker_id", "Train only seq2seq model. --train-postnet-only Train only postnet model. --restore-parts=<path>", "lines = f.readlines() l = lines[0].decode(\"utf-8\").split(\"|\") assert len(l) == 4", "batch_group_size % batch_size == 0 self.permutate = permutate def __iter__(self):", "Mel, Y) data_loader = data_utils.DataLoader( dataset, batch_size=hparams.batch_size, num_workers=hparams.num_workers, sampler=sampler, collate_fn=collate_fn,", "if checkpoint_restore_parts is not None: restore_parts(checkpoint_restore_parts, model) # Load checkpoints", "cudnn.benchmark = False _frontend = None # to be set", "a new particle.\", \"There's a way to measure the acute", "= hparams.outputs_per_step downsample_step = hparams.downsample_step multi_speaker = len(batch[0]) == 4", "data x, mel, y = Variable(x), Variable(mel), Variable(y) text_positions =", "idx): if self.multi_speaker: text, speaker_id = self.X[idx] return text, self.Mel[idx],", "0::downsample_step, :].contiguous() # Lengths input_lengths = input_lengths.long().numpy() decoder_lengths = target_lengths.long().numpy()", "self.batch_size) random.shuffle(perm) indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1) # Handle last", "Presets if hparams.preset is not None and hparams.preset != \"\":", "model) # Load checkpoints if checkpoint_postnet_path is not None: load_checkpoint(checkpoint_postnet_path,", "Which model to be trained train_seq2seq = args[\"--train-seq2seq-only\"] train_postnet =", "/= np.max(np.abs(signal)) # Alignment path = join(eval_output_dir, \"step{:09d}_text{}_{}_alignment.png\".format( global_step, idx,", "_NPYDataSource(FileDataSource): def __init__(self, data_root, col, speaker_id=None): self.data_root = data_root self.col", "we use masked loss if hparams.masked_loss_weight > 0: # decoder", "if hparams.preset is not None and hparams.preset != \"\": preset", "* z.mean() else: binary_div = z.mean() return l1_loss, binary_div @jit(nopython=True)", "Mel.file_data_source.frame_lengths sampler = PartialyRandomizedSimilarTimeLengthSampler( frame_lengths, batch_size=hparams.batch_size) # Dataset and Dataloader", "= Mel self.Y = Y # alias self.multi_speaker = X.file_data_source.multi_speaker", "col, speaker_id=None): self.data_root = data_root self.col = col self.frame_lengths =", "model.postnet(mel) mel_outputs, attn, done_hat = None, None, None # Losses", "<= 0: binary_div = Variable(y.data.new(1).zero_()) else: y_hat_logits = logit(y_hat) z", "0: grad_norm = torch.nn.utils.clip_grad_norm( model.get_trainable_parameters(), clip_thresh) optimizer.step() # Logs writer.add_scalar(\"loss\",", "# (B, T, D) mask_ = mask.expand_as(input) loss = self.criterion(input", "if lengths is None and mask is None: raise RuntimeError(\"Should", "if clip_thresh > 0: writer.add_scalar(\"gradient norm\", grad_norm, global_step) writer.add_scalar(\"learning rate\",", ":, :priority_bin], y[:, :, :priority_bin]) l1_loss = (1 - priority_w)", "= args[\"--train-seq2seq-only\"] train_postnet = args[\"--train-postnet-only\"] # train both if not", "# done flags done = np.array([_pad(np.zeros(len(x[1]) // r // downsample_step", "def load_checkpoint(path, model, optimizer, reset_optimizer): global global_step global global_epoch print(\"Load", "for i in range(len(indices) // batch_group_size): s = i *", "if multi_speaker and self.speaker_id is not None: speaker_ids = list(map(lambda", "downsample_step - 1), max_decoder_target_len, constant_values=1) for x in batch]) done", "Priority L1 loss if priority_bin is not None and priority_w", "masked_l1( y_hat[:, :, :priority_bin], y[:, :, :priority_bin], mask=mask) \\ +", "from torch.utils import data as data_utils from torch.utils.data.sampler import Sampler", "# using multi-speaker dataset as a single speaker dataset if", "f.readlines() l = lines[0].decode(\"utf-8\").split(\"|\") assert len(l) == 4 or len(l)", "alignment, _, mel = synthesis.tts( model, text, p=0, speaker_id=speaker_id, fast=False)", "= None elif train_postnet: assert speaker_ids is None linear_outputs =", "met with other leaders at the Group of 20 conference.\",", "reset_optimizer: optimizer_state = checkpoint[\"optimizer\"] if optimizer_state is not None: print(\"Load", "b in range(B): W[b] = guided_attention(input_lengths[b], max_input_len, target_lengths[b], max_target_len, g).T", "if data_root is None: data_root = join(dirname(__file__), \"data\", \"ljspeech\") log_event_path", "for b in range(B): W[b] = guided_attention(input_lengths[b], max_input_len, target_lengths[b], max_target_len,", "it 3. Permutate mini-batchs \"\"\" def __init__(self, lengths, batch_size=16, batch_group_size=None,", "= sequence_mask( target_lengths / (r * downsample_step), max_len=mel.size(1)).unsqueeze(-1) if downsample_step", "self.X[idx], self.Mel[idx], self.Y[idx] def __len__(self): return len(self.X) def sequence_mask(sequence_length, max_len=None):", "def __init__(self, data_root, col, speaker_id=None): self.data_root = data_root self.col =", "list(map(int, self.frame_lengths)) return paths def collect_features(self, path): return np.load(path) class", "input_lengths[idx] # Alignment # Multi-hop attention if attn is not", "print(\"Training whole model\") train_seq2seq, train_postnet = True, True if train_seq2seq:", "linear spectrogram\", spectrogram, global_step) # Predicted audio signal signal =", "alignment) tag = \"eval_averaged_alignment_{}_{}\".format(idx, speaker_str) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255),", "mask\") # (B, T, 1) if mask is None: mask", "spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image(\"Predicted linear spectrogram\", spectrogram, global_step) # Predicted", "if hparams.lr_schedule is not None: lr_schedule_f = getattr(dv3.lrschedule, hparams.lr_schedule) current_lr", "linear_outputs, attn, done_hat = model( x, mel, speaker_ids=speaker_ids, text_positions=text_positions, frame_positions=frame_positions,", "suffix = \"\" m = model elif train_seq2seq: suffix =", "writer.add_scalar(\"loss (per epoch)\", averaged_loss, global_epoch) print(\"Loss: {}\".format(running_loss / (len(data_loader)))) global_epoch", "global_step) writer.add_scalar(\"learning rate\", current_lr, global_step) global_step += 1 running_loss +=", "= None # to be set later def _pad(seq, max_len,", "\"averaged_alignment\" writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # Predicted mel", "step, checkpoint_dir, epoch, train_seq2seq, train_postnet): if train_seq2seq and train_postnet: suffix", "== 0: save_states( global_step, writer, mel_outputs, linear_outputs, attn, mel, y,", "is not None: xlabel += '\\n\\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder", "Sampler import numpy as np from numba import jit from", "tag = \"alignment_layer{}\".format(i + 1) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255),", "speaker dataset indices = np.array(speaker_ids) == self.speaker_id paths = list(np.array(paths)[indices])", "import sys from os.path import dirname, join from tqdm import", "W def train(model, data_loader, optimizer, writer, init_lr=0.002, checkpoint_dir=None, checkpoint_interval=None, nepochs=None,", "mask is None l1_loss = l1(y_hat, y) # Priority L1", "]. --checkpoint=<path> Restore model from checkpoint path if given. --checkpoint-seq2seq=<path>", "None: batch_group_size = min(batch_size * 32, len(self.lengths)) if batch_group_size %", "return len(self.X) def sequence_mask(sequence_length, max_len=None): if max_len is None: max_len", "assert len(l) == 4 or len(l) == 5 multi_speaker =" ]
[ "find_faces(capture) for bbox in faces: rect = pygame.Rect(bbox.left(), bbox.top(), bbox.width(),", "import pygame.camera # Init display screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN) pygame.display.set_caption(\"Magic", "pygame.event.get(): if event.type in (pygame.QUIT, pygame.KEYDOWN): return capture = camera.get_image()", "Init clock clock = pygame.time.Clock() # Init camera pygame.camera.init() cameras", "for bbox in faces: rect = pygame.Rect(bbox.left(), bbox.top(), bbox.width(), bbox.height())", "(time.time() - start)), (0,0)) pygame.display.flip() with open(os.path.join(imgdir, \"index.tsv\")) as f:", "= pygame.camera.list_cameras() if not cameras: pygame.quit() print \"No cameras found,", "= recognition.MultiBinaryTree() imgdir = sys.argv[1] if len(sys.argv) > 1 else", "pygame.FULLSCREEN) pygame.display.set_caption(\"Magic Mirror\") #pygame.mouse.set_visible(False) # Init font pygame.font.init() font_colour =", "font_colour, font_size = 40): return font(font_size).render(str(text), True, colour) # Init", "faces) img = index.nearest(face) screen.blit(pygame.image.load(img), (0,0)) pygame.display.flip() pygame.time.wait(10*1000) # 30s", "= camera.get_image() faces, capture_data = find_faces(capture) for bbox in faces:", "if not cameras: pygame.quit() print \"No cameras found, exiting!\" sys.exit(1)", "font(font_size = 40): if font_size not in fonts: fonts[font_size] =", "= numpy.array(pygame.surfarray.pixels3d(pygame_capture)) capture = numpy.swapaxes(capture, 0, 1) return recognition.align.getAllFaceBoundingBoxes(capture), capture", "255), fullscreen) pygame.display.flip() face = recognition.average(recognition.getRepBBox(capture, face) for face in", "import sys, os def find_faces(pygame_capture): capture = numpy.array(pygame.surfarray.pixels3d(pygame_capture)) capture =", "def write(text, colour = font_colour, font_size = 40): return font(font_size).render(str(text),", "255, 255), fullscreen) pygame.display.flip() face = recognition.average(recognition.getRepBBox(capture, face) for face", "screen.blit(pygame.image.load(img), (0,0)) pygame.display.flip() pygame.time.wait(10*1000) # 30s def main(): countdown =", "screen.blit(write(countdown), (0,0)) countdown -= 1 pygame.display.flip() pygame.quit() if __name__ ==", "capture_data = find_faces(capture) for bbox in faces: rect = pygame.Rect(bbox.left(),", "not cameras: pygame.quit() print \"No cameras found, exiting!\" sys.exit(1) camera", "= font_colour, font_size = 40): return font(font_size).render(str(text), True, colour) #", "photo_samples.append(pygame.image.load(os.path.join(imgdir, \"thumbnails\", img))) # Init clock clock = pygame.time.Clock() #", "10 lastFaceCount = len(faces) elif countdown == 0: recognize(capture_data, faces)", "event in pygame.event.get(): if event.type in (pygame.QUIT, pygame.KEYDOWN): return capture", "index... %fs\" % (time.time() - start)), (0,0)) pygame.display.flip() with open(os.path.join(imgdir,", "40): if font_size not in fonts: fonts[font_size] = pygame.font.Font(\"Futura.ttc\", font_size)", "face = recognition.average(recognition.getRepBBox(capture, face) for face in faces) img =", "face in faces) img = index.nearest(face) screen.blit(pygame.image.load(img), (0,0)) pygame.display.flip() pygame.time.wait(10*1000)", "start)), (0,0)) pygame.display.flip() with open(os.path.join(imgdir, \"index.tsv\")) as f: for line", "numpy.array(pygame.surfarray.pixels3d(pygame_capture)) capture = numpy.swapaxes(capture, 0, 1) return recognition.align.getAllFaceBoundingBoxes(capture), capture index", "Mirror\") #pygame.mouse.set_visible(False) # Init font pygame.font.init() font_colour = 16, 117,", "f: for line in f: line = line.strip().split(\"\\t\") img =", "False) screen.blit(pygame.transform.smoothscale(capture, screen.get_size()), (0,0)) if len(faces) == 0 or len(faces)", "pygame.Rect(0, 0, screen.get_width(), screen.get_height()) pygame.draw.rect(screen, (255, 255, 255), fullscreen) pygame.display.flip()", "% (time.time() - start)), (0,50)) pygame.display.flip() for img in os.listdir(os.path.join(imgdir,", "%fs\" % (time.time() - start)), (0,0)) pygame.display.flip() with open(os.path.join(imgdir, \"index.tsv\"))", "!= lastFaceCount: countdown = 10 lastFaceCount = len(faces) elif countdown", "font_size not in fonts: fonts[font_size] = pygame.font.Font(\"Futura.ttc\", font_size) return fonts[font_size]", "{40: pygame.font.Font(\"Futura.ttc\", 40)} def font(font_size = 40): if font_size not", "colour = font_colour, font_size = 40): return font(font_size).render(str(text), True, colour)", "fullscreen = pygame.Rect(0, 0, screen.get_width(), screen.get_height()) pygame.draw.rect(screen, (255, 255, 255),", "fonts: fonts[font_size] = pygame.font.Font(\"Futura.ttc\", font_size) return fonts[font_size] def write(text, colour", "faces: rect = pygame.Rect(bbox.left(), bbox.top(), bbox.width(), bbox.height()) pygame.draw.rect(capture, (255, 0,", "imgdir = sys.argv[1] if len(sys.argv) > 1 else \"images\" photo_samples", "# Init font pygame.font.init() font_colour = 16, 117, 186 fonts", "font pygame.font.init() font_colour = 16, 117, 186 fonts = {40:", "rect = pygame.Rect(bbox.left(), bbox.top(), bbox.width(), bbox.height()) pygame.draw.rect(capture, (255, 0, 0),", "screen.blit(pygame.transform.smoothscale(capture, screen.get_size()), (0,0)) if len(faces) == 0 or len(faces) !=", "screen.blit(write(\"Loading images... %fs\" % (time.time() - start)), (0,50)) pygame.display.flip() for", "pygame.display.flip() for img in os.listdir(os.path.join(imgdir, \"thumbnails\")): photo_samples.append(pygame.image.load(os.path.join(imgdir, \"thumbnails\", img))) #", "0, 1) return recognition.align.getAllFaceBoundingBoxes(capture), capture index = recognition.MultiBinaryTree() imgdir =", "img in os.listdir(os.path.join(imgdir, \"thumbnails\")): photo_samples.append(pygame.image.load(os.path.join(imgdir, \"thumbnails\", img))) # Init clock", "time.time() import pygame, numpy import pygame.camera # Init display screen", "= index.nearest(face) screen.blit(pygame.image.load(img), (0,0)) pygame.display.flip() pygame.time.wait(10*1000) # 30s def main():", "> 1 else \"images\" photo_samples = [] screen.blit(write(\"Loading index... %fs\"", "as f: for line in f: line = line.strip().split(\"\\t\") img", "camera.start() # Mainloop def recognize(capture, faces): fullscreen = pygame.Rect(0, 0,", "font(font_size).render(str(text), True, colour) # Init AI import recognition import sys,", "while True: clock.tick(60) for event in pygame.event.get(): if event.type in", "main(): countdown = 10 lastFaceCount = 0 while True: clock.tick(60)", "= [] screen.blit(write(\"Loading index... %fs\" % (time.time() - start)), (0,0))", "Init camera pygame.camera.init() cameras = pygame.camera.list_cameras() if not cameras: pygame.quit()", "= time.time() import pygame, numpy import pygame.camera # Init display", "= len(faces) elif countdown == 0: recognize(capture_data, faces) countdown =", "= 0 while True: clock.tick(60) for event in pygame.event.get(): if", "Init display screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN) pygame.display.set_caption(\"Magic Mirror\") #pygame.mouse.set_visible(False) #", "16, 117, 186 fonts = {40: pygame.font.Font(\"Futura.ttc\", 40)} def font(font_size", "- start)), (0,0)) pygame.display.flip() with open(os.path.join(imgdir, \"index.tsv\")) as f: for", "= find_faces(capture) for bbox in faces: rect = pygame.Rect(bbox.left(), bbox.top(),", "0: recognize(capture_data, faces) countdown = 10 else: screen.blit(write(countdown), (0,0)) countdown", "pygame.camera # Init display screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN) pygame.display.set_caption(\"Magic Mirror\")", "def recognize(capture, faces): fullscreen = pygame.Rect(0, 0, screen.get_width(), screen.get_height()) pygame.draw.rect(screen,", "index.insert(description, img) screen.blit(write(\"Loading images... %fs\" % (time.time() - start)), (0,50))", "pygame.font.Font(\"Futura.ttc\", 40)} def font(font_size = 40): if font_size not in", "1 else \"images\" photo_samples = [] screen.blit(write(\"Loading index... %fs\" %", "clock = pygame.time.Clock() # Init camera pygame.camera.init() cameras = pygame.camera.list_cameras()", "faces, capture_data = find_faces(capture) for bbox in faces: rect =", "screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN) pygame.display.set_caption(\"Magic Mirror\") #pygame.mouse.set_visible(False) # Init font", "pygame.Rect(bbox.left(), bbox.top(), bbox.width(), bbox.height()) pygame.draw.rect(capture, (255, 0, 0), rect, 2)", "font_size) return fonts[font_size] def write(text, colour = font_colour, font_size =", "return fonts[font_size] def write(text, colour = font_colour, font_size = 40):", "in (pygame.QUIT, pygame.KEYDOWN): return capture = camera.get_image() faces, capture_data =", "0), rect, 2) capture = pygame.transform.flip(capture, True, False) screen.blit(pygame.transform.smoothscale(capture, screen.get_size()),", "for line in f: line = line.strip().split(\"\\t\") img = os.path.join(imgdir,", "img = os.path.join(imgdir, line[0]) description = numpy.array([float(n) for n in", "clock clock = pygame.time.Clock() # Init camera pygame.camera.init() cameras =", "= 10 lastFaceCount = 0 while True: clock.tick(60) for event", "2) capture = pygame.transform.flip(capture, True, False) screen.blit(pygame.transform.smoothscale(capture, screen.get_size()), (0,0)) if", "found, exiting!\" sys.exit(1) camera = pygame.camera.Camera(cameras[0]) camera.start() # Mainloop def", "10 lastFaceCount = 0 while True: clock.tick(60) for event in", "pygame.display.flip() with open(os.path.join(imgdir, \"index.tsv\")) as f: for line in f:", "faces) countdown = 10 else: screen.blit(write(countdown), (0,0)) countdown -= 1", "(pygame.QUIT, pygame.KEYDOWN): return capture = camera.get_image() faces, capture_data = find_faces(capture)", "pygame.display.set_mode((0,0), pygame.FULLSCREEN) pygame.display.set_caption(\"Magic Mirror\") #pygame.mouse.set_visible(False) # Init font pygame.font.init() font_colour", "fonts = {40: pygame.font.Font(\"Futura.ttc\", 40)} def font(font_size = 40): if", "pygame.KEYDOWN): return capture = camera.get_image() faces, capture_data = find_faces(capture) for", "(0,0)) pygame.display.flip() pygame.time.wait(10*1000) # 30s def main(): countdown = 10", "/usr/bin/python2 import time start = time.time() import pygame, numpy import", "bbox.top(), bbox.width(), bbox.height()) pygame.draw.rect(capture, (255, 0, 0), rect, 2) capture", "\"index.tsv\")) as f: for line in f: line = line.strip().split(\"\\t\")", "lastFaceCount: countdown = 10 lastFaceCount = len(faces) elif countdown ==", "Mainloop def recognize(capture, faces): fullscreen = pygame.Rect(0, 0, screen.get_width(), screen.get_height())", "True, colour) # Init AI import recognition import sys, os", "event.type in (pygame.QUIT, pygame.KEYDOWN): return capture = camera.get_image() faces, capture_data", "(0,0)) pygame.display.flip() with open(os.path.join(imgdir, \"index.tsv\")) as f: for line in", "= pygame.Rect(bbox.left(), bbox.top(), bbox.width(), bbox.height()) pygame.draw.rect(capture, (255, 0, 0), rect,", "%fs\" % (time.time() - start)), (0,50)) pygame.display.flip() for img in", "lastFaceCount = len(faces) elif countdown == 0: recognize(capture_data, faces) countdown", "40)} def font(font_size = 40): if font_size not in fonts:", "# Init AI import recognition import sys, os def find_faces(pygame_capture):", "img = index.nearest(face) screen.blit(pygame.image.load(img), (0,0)) pygame.display.flip() pygame.time.wait(10*1000) # 30s def", "1) return recognition.align.getAllFaceBoundingBoxes(capture), capture index = recognition.MultiBinaryTree() imgdir = sys.argv[1]", "# Mainloop def recognize(capture, faces): fullscreen = pygame.Rect(0, 0, screen.get_width(),", "pygame.transform.flip(capture, True, False) screen.blit(pygame.transform.smoothscale(capture, screen.get_size()), (0,0)) if len(faces) == 0", "start = time.time() import pygame, numpy import pygame.camera # Init", "= {40: pygame.font.Font(\"Futura.ttc\", 40)} def font(font_size = 40): if font_size", "os.listdir(os.path.join(imgdir, \"thumbnails\")): photo_samples.append(pygame.image.load(os.path.join(imgdir, \"thumbnails\", img))) # Init clock clock =", "= 40): if font_size not in fonts: fonts[font_size] = pygame.font.Font(\"Futura.ttc\",", "lastFaceCount = 0 while True: clock.tick(60) for event in pygame.event.get():", "pygame.camera.init() cameras = pygame.camera.list_cameras() if not cameras: pygame.quit() print \"No", "countdown == 0: recognize(capture_data, faces) countdown = 10 else: screen.blit(write(countdown),", "screen.get_height()) pygame.draw.rect(screen, (255, 255, 255), fullscreen) pygame.display.flip() face = recognition.average(recognition.getRepBBox(capture,", "(time.time() - start)), (0,50)) pygame.display.flip() for img in os.listdir(os.path.join(imgdir, \"thumbnails\")):", "or len(faces) != lastFaceCount: countdown = 10 lastFaceCount = len(faces)", "else: screen.blit(write(countdown), (0,0)) countdown -= 1 pygame.display.flip() pygame.quit() if __name__", "face) for face in faces) img = index.nearest(face) screen.blit(pygame.image.load(img), (0,0))", "camera = pygame.camera.Camera(cameras[0]) camera.start() # Mainloop def recognize(capture, faces): fullscreen", "117, 186 fonts = {40: pygame.font.Font(\"Futura.ttc\", 40)} def font(font_size =", "os.path.join(imgdir, line[0]) description = numpy.array([float(n) for n in line[1:]]) index.insert(description,", "pygame.quit() print \"No cameras found, exiting!\" sys.exit(1) camera = pygame.camera.Camera(cameras[0])", "n in line[1:]]) index.insert(description, img) screen.blit(write(\"Loading images... %fs\" % (time.time()", "pygame.display.set_caption(\"Magic Mirror\") #pygame.mouse.set_visible(False) # Init font pygame.font.init() font_colour = 16,", "= pygame.camera.Camera(cameras[0]) camera.start() # Mainloop def recognize(capture, faces): fullscreen =", "img))) # Init clock clock = pygame.time.Clock() # Init camera", "capture = numpy.array(pygame.surfarray.pixels3d(pygame_capture)) capture = numpy.swapaxes(capture, 0, 1) return recognition.align.getAllFaceBoundingBoxes(capture),", "clock.tick(60) for event in pygame.event.get(): if event.type in (pygame.QUIT, pygame.KEYDOWN):", "pygame.camera.list_cameras() if not cameras: pygame.quit() print \"No cameras found, exiting!\"", "recognition import sys, os def find_faces(pygame_capture): capture = numpy.array(pygame.surfarray.pixels3d(pygame_capture)) capture", "30s def main(): countdown = 10 lastFaceCount = 0 while", "== 0: recognize(capture_data, faces) countdown = 10 else: screen.blit(write(countdown), (0,0))", "\"No cameras found, exiting!\" sys.exit(1) camera = pygame.camera.Camera(cameras[0]) camera.start() #", "def find_faces(pygame_capture): capture = numpy.array(pygame.surfarray.pixels3d(pygame_capture)) capture = numpy.swapaxes(capture, 0, 1)", "= 10 lastFaceCount = len(faces) elif countdown == 0: recognize(capture_data,", "if len(faces) == 0 or len(faces) != lastFaceCount: countdown =", "0 while True: clock.tick(60) for event in pygame.event.get(): if event.type", "0, screen.get_width(), screen.get_height()) pygame.draw.rect(screen, (255, 255, 255), fullscreen) pygame.display.flip() face", "if event.type in (pygame.QUIT, pygame.KEYDOWN): return capture = camera.get_image() faces,", "Init font pygame.font.init() font_colour = 16, 117, 186 fonts =", "pygame.font.init() font_colour = 16, 117, 186 fonts = {40: pygame.font.Font(\"Futura.ttc\",", "in faces) img = index.nearest(face) screen.blit(pygame.image.load(img), (0,0)) pygame.display.flip() pygame.time.wait(10*1000) #", "- start)), (0,50)) pygame.display.flip() for img in os.listdir(os.path.join(imgdir, \"thumbnails\")): photo_samples.append(pygame.image.load(os.path.join(imgdir,", "(255, 255, 255), fullscreen) pygame.display.flip() face = recognition.average(recognition.getRepBBox(capture, face) for", "description = numpy.array([float(n) for n in line[1:]]) index.insert(description, img) screen.blit(write(\"Loading", "for event in pygame.event.get(): if event.type in (pygame.QUIT, pygame.KEYDOWN): return", "font_size = 40): return font(font_size).render(str(text), True, colour) # Init AI", "if len(sys.argv) > 1 else \"images\" photo_samples = [] screen.blit(write(\"Loading", "img) screen.blit(write(\"Loading images... %fs\" % (time.time() - start)), (0,50)) pygame.display.flip()", "print \"No cameras found, exiting!\" sys.exit(1) camera = pygame.camera.Camera(cameras[0]) camera.start()", "= 16, 117, 186 fonts = {40: pygame.font.Font(\"Futura.ttc\", 40)} def", "len(sys.argv) > 1 else \"images\" photo_samples = [] screen.blit(write(\"Loading index...", "in pygame.event.get(): if event.type in (pygame.QUIT, pygame.KEYDOWN): return capture =", "find_faces(pygame_capture): capture = numpy.array(pygame.surfarray.pixels3d(pygame_capture)) capture = numpy.swapaxes(capture, 0, 1) return", "index.nearest(face) screen.blit(pygame.image.load(img), (0,0)) pygame.display.flip() pygame.time.wait(10*1000) # 30s def main(): countdown", "#! /usr/bin/python2 import time start = time.time() import pygame, numpy", "exiting!\" sys.exit(1) camera = pygame.camera.Camera(cameras[0]) camera.start() # Mainloop def recognize(capture,", "sys, os def find_faces(pygame_capture): capture = numpy.array(pygame.surfarray.pixels3d(pygame_capture)) capture = numpy.swapaxes(capture,", "(255, 0, 0), rect, 2) capture = pygame.transform.flip(capture, True, False)", "else \"images\" photo_samples = [] screen.blit(write(\"Loading index... %fs\" % (time.time()", "in os.listdir(os.path.join(imgdir, \"thumbnails\")): photo_samples.append(pygame.image.load(os.path.join(imgdir, \"thumbnails\", img))) # Init clock clock", "return font(font_size).render(str(text), True, colour) # Init AI import recognition import", "pygame, numpy import pygame.camera # Init display screen = pygame.display.set_mode((0,0),", "pygame.camera.Camera(cameras[0]) camera.start() # Mainloop def recognize(capture, faces): fullscreen = pygame.Rect(0,", "images... %fs\" % (time.time() - start)), (0,50)) pygame.display.flip() for img", "rect, 2) capture = pygame.transform.flip(capture, True, False) screen.blit(pygame.transform.smoothscale(capture, screen.get_size()), (0,0))", "len(faces) elif countdown == 0: recognize(capture_data, faces) countdown = 10", "def main(): countdown = 10 lastFaceCount = 0 while True:", "len(faces) != lastFaceCount: countdown = 10 lastFaceCount = len(faces) elif", "cameras found, exiting!\" sys.exit(1) camera = pygame.camera.Camera(cameras[0]) camera.start() # Mainloop", "AI import recognition import sys, os def find_faces(pygame_capture): capture =", "(0,50)) pygame.display.flip() for img in os.listdir(os.path.join(imgdir, \"thumbnails\")): photo_samples.append(pygame.image.load(os.path.join(imgdir, \"thumbnails\", img)))", "not in fonts: fonts[font_size] = pygame.font.Font(\"Futura.ttc\", font_size) return fonts[font_size] def", "pygame.time.wait(10*1000) # 30s def main(): countdown = 10 lastFaceCount =", "numpy.array([float(n) for n in line[1:]]) index.insert(description, img) screen.blit(write(\"Loading images... %fs\"", "% (time.time() - start)), (0,0)) pygame.display.flip() with open(os.path.join(imgdir, \"index.tsv\")) as", "display screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN) pygame.display.set_caption(\"Magic Mirror\") #pygame.mouse.set_visible(False) # Init", "faces): fullscreen = pygame.Rect(0, 0, screen.get_width(), screen.get_height()) pygame.draw.rect(screen, (255, 255,", "pygame.draw.rect(screen, (255, 255, 255), fullscreen) pygame.display.flip() face = recognition.average(recognition.getRepBBox(capture, face)", "elif countdown == 0: recognize(capture_data, faces) countdown = 10 else:", "fullscreen) pygame.display.flip() face = recognition.average(recognition.getRepBBox(capture, face) for face in faces)", "in faces: rect = pygame.Rect(bbox.left(), bbox.top(), bbox.width(), bbox.height()) pygame.draw.rect(capture, (255,", "= pygame.time.Clock() # Init camera pygame.camera.init() cameras = pygame.camera.list_cameras() if", "line[0]) description = numpy.array([float(n) for n in line[1:]]) index.insert(description, img)", "recognition.average(recognition.getRepBBox(capture, face) for face in faces) img = index.nearest(face) screen.blit(pygame.image.load(img),", "font_colour = 16, 117, 186 fonts = {40: pygame.font.Font(\"Futura.ttc\", 40)}", "os def find_faces(pygame_capture): capture = numpy.array(pygame.surfarray.pixels3d(pygame_capture)) capture = numpy.swapaxes(capture, 0,", "write(text, colour = font_colour, font_size = 40): return font(font_size).render(str(text), True,", "capture index = recognition.MultiBinaryTree() imgdir = sys.argv[1] if len(sys.argv) >", "pygame.draw.rect(capture, (255, 0, 0), rect, 2) capture = pygame.transform.flip(capture, True,", "if font_size not in fonts: fonts[font_size] = pygame.font.Font(\"Futura.ttc\", font_size) return", "\"thumbnails\")): photo_samples.append(pygame.image.load(os.path.join(imgdir, \"thumbnails\", img))) # Init clock clock = pygame.time.Clock()", "True: clock.tick(60) for event in pygame.event.get(): if event.type in (pygame.QUIT,", "= numpy.swapaxes(capture, 0, 1) return recognition.align.getAllFaceBoundingBoxes(capture), capture index = recognition.MultiBinaryTree()", "colour) # Init AI import recognition import sys, os def", "= 40): return font(font_size).render(str(text), True, colour) # Init AI import", "bbox.width(), bbox.height()) pygame.draw.rect(capture, (255, 0, 0), rect, 2) capture =", "#pygame.mouse.set_visible(False) # Init font pygame.font.init() font_colour = 16, 117, 186", "= pygame.display.set_mode((0,0), pygame.FULLSCREEN) pygame.display.set_caption(\"Magic Mirror\") #pygame.mouse.set_visible(False) # Init font pygame.font.init()", "capture = numpy.swapaxes(capture, 0, 1) return recognition.align.getAllFaceBoundingBoxes(capture), capture index =", "bbox.height()) pygame.draw.rect(capture, (255, 0, 0), rect, 2) capture = pygame.transform.flip(capture,", "0 or len(faces) != lastFaceCount: countdown = 10 lastFaceCount =", "[] screen.blit(write(\"Loading index... %fs\" % (time.time() - start)), (0,0)) pygame.display.flip()", "pygame.font.Font(\"Futura.ttc\", font_size) return fonts[font_size] def write(text, colour = font_colour, font_size", "in fonts: fonts[font_size] = pygame.font.Font(\"Futura.ttc\", font_size) return fonts[font_size] def write(text,", "= line.strip().split(\"\\t\") img = os.path.join(imgdir, line[0]) description = numpy.array([float(n) for", "import time start = time.time() import pygame, numpy import pygame.camera", "\"thumbnails\", img))) # Init clock clock = pygame.time.Clock() # Init", "line[1:]]) index.insert(description, img) screen.blit(write(\"Loading images... %fs\" % (time.time() - start)),", "screen.get_size()), (0,0)) if len(faces) == 0 or len(faces) != lastFaceCount:", "cameras = pygame.camera.list_cameras() if not cameras: pygame.quit() print \"No cameras", "return recognition.align.getAllFaceBoundingBoxes(capture), capture index = recognition.MultiBinaryTree() imgdir = sys.argv[1] if", "fonts[font_size] def write(text, colour = font_colour, font_size = 40): return", "(0,0)) countdown -= 1 pygame.display.flip() pygame.quit() if __name__ == \"__main__\":", "Init AI import recognition import sys, os def find_faces(pygame_capture): capture", "bbox in faces: rect = pygame.Rect(bbox.left(), bbox.top(), bbox.width(), bbox.height()) pygame.draw.rect(capture,", "recognition.align.getAllFaceBoundingBoxes(capture), capture index = recognition.MultiBinaryTree() imgdir = sys.argv[1] if len(sys.argv)", "# Init display screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN) pygame.display.set_caption(\"Magic Mirror\") #pygame.mouse.set_visible(False)", "pygame.time.Clock() # Init camera pygame.camera.init() cameras = pygame.camera.list_cameras() if not", "countdown = 10 lastFaceCount = len(faces) elif countdown == 0:", "pygame.display.flip() face = recognition.average(recognition.getRepBBox(capture, face) for face in faces) img", "cameras: pygame.quit() print \"No cameras found, exiting!\" sys.exit(1) camera =", "countdown = 10 lastFaceCount = 0 while True: clock.tick(60) for", "sys.exit(1) camera = pygame.camera.Camera(cameras[0]) camera.start() # Mainloop def recognize(capture, faces):", "screen.blit(write(\"Loading index... %fs\" % (time.time() - start)), (0,0)) pygame.display.flip() with", "capture = pygame.transform.flip(capture, True, False) screen.blit(pygame.transform.smoothscale(capture, screen.get_size()), (0,0)) if len(faces)", "line in f: line = line.strip().split(\"\\t\") img = os.path.join(imgdir, line[0])", "numpy import pygame.camera # Init display screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN)", "186 fonts = {40: pygame.font.Font(\"Futura.ttc\", 40)} def font(font_size = 40):", "# Init camera pygame.camera.init() cameras = pygame.camera.list_cameras() if not cameras:", "= 10 else: screen.blit(write(countdown), (0,0)) countdown -= 1 pygame.display.flip() pygame.quit()", "= recognition.average(recognition.getRepBBox(capture, face) for face in faces) img = index.nearest(face)", "countdown = 10 else: screen.blit(write(countdown), (0,0)) countdown -= 1 pygame.display.flip()", "numpy.swapaxes(capture, 0, 1) return recognition.align.getAllFaceBoundingBoxes(capture), capture index = recognition.MultiBinaryTree() imgdir", "return capture = camera.get_image() faces, capture_data = find_faces(capture) for bbox", "fonts[font_size] = pygame.font.Font(\"Futura.ttc\", font_size) return fonts[font_size] def write(text, colour =", "# 30s def main(): countdown = 10 lastFaceCount = 0", "= pygame.transform.flip(capture, True, False) screen.blit(pygame.transform.smoothscale(capture, screen.get_size()), (0,0)) if len(faces) ==", "with open(os.path.join(imgdir, \"index.tsv\")) as f: for line in f: line", "camera pygame.camera.init() cameras = pygame.camera.list_cameras() if not cameras: pygame.quit() print", "0, 0), rect, 2) capture = pygame.transform.flip(capture, True, False) screen.blit(pygame.transform.smoothscale(capture,", "== 0 or len(faces) != lastFaceCount: countdown = 10 lastFaceCount", "True, False) screen.blit(pygame.transform.smoothscale(capture, screen.get_size()), (0,0)) if len(faces) == 0 or", "len(faces) == 0 or len(faces) != lastFaceCount: countdown = 10", "start)), (0,50)) pygame.display.flip() for img in os.listdir(os.path.join(imgdir, \"thumbnails\")): photo_samples.append(pygame.image.load(os.path.join(imgdir, \"thumbnails\",", "40): return font(font_size).render(str(text), True, colour) # Init AI import recognition", "for face in faces) img = index.nearest(face) screen.blit(pygame.image.load(img), (0,0)) pygame.display.flip()", "10 else: screen.blit(write(countdown), (0,0)) countdown -= 1 pygame.display.flip() pygame.quit() if", "screen.get_width(), screen.get_height()) pygame.draw.rect(screen, (255, 255, 255), fullscreen) pygame.display.flip() face =", "countdown -= 1 pygame.display.flip() pygame.quit() if __name__ == \"__main__\": main()", "recognize(capture_data, faces) countdown = 10 else: screen.blit(write(countdown), (0,0)) countdown -=", "(0,0)) if len(faces) == 0 or len(faces) != lastFaceCount: countdown", "# Init clock clock = pygame.time.Clock() # Init camera pygame.camera.init()", "= numpy.array([float(n) for n in line[1:]]) index.insert(description, img) screen.blit(write(\"Loading images...", "= pygame.font.Font(\"Futura.ttc\", font_size) return fonts[font_size] def write(text, colour = font_colour,", "open(os.path.join(imgdir, \"index.tsv\")) as f: for line in f: line =", "= sys.argv[1] if len(sys.argv) > 1 else \"images\" photo_samples =", "in line[1:]]) index.insert(description, img) screen.blit(write(\"Loading images... %fs\" % (time.time() -", "capture = camera.get_image() faces, capture_data = find_faces(capture) for bbox in", "line.strip().split(\"\\t\") img = os.path.join(imgdir, line[0]) description = numpy.array([float(n) for n", "def font(font_size = 40): if font_size not in fonts: fonts[font_size]", "recognition.MultiBinaryTree() imgdir = sys.argv[1] if len(sys.argv) > 1 else \"images\"", "photo_samples = [] screen.blit(write(\"Loading index... %fs\" % (time.time() - start)),", "index = recognition.MultiBinaryTree() imgdir = sys.argv[1] if len(sys.argv) > 1", "\"images\" photo_samples = [] screen.blit(write(\"Loading index... %fs\" % (time.time() -", "time start = time.time() import pygame, numpy import pygame.camera #", "for img in os.listdir(os.path.join(imgdir, \"thumbnails\")): photo_samples.append(pygame.image.load(os.path.join(imgdir, \"thumbnails\", img))) # Init", "camera.get_image() faces, capture_data = find_faces(capture) for bbox in faces: rect", "f: line = line.strip().split(\"\\t\") img = os.path.join(imgdir, line[0]) description =", "for n in line[1:]]) index.insert(description, img) screen.blit(write(\"Loading images... %fs\" %", "recognize(capture, faces): fullscreen = pygame.Rect(0, 0, screen.get_width(), screen.get_height()) pygame.draw.rect(screen, (255,", "pygame.display.flip() pygame.time.wait(10*1000) # 30s def main(): countdown = 10 lastFaceCount", "sys.argv[1] if len(sys.argv) > 1 else \"images\" photo_samples = []", "import pygame, numpy import pygame.camera # Init display screen =", "in f: line = line.strip().split(\"\\t\") img = os.path.join(imgdir, line[0]) description", "= pygame.Rect(0, 0, screen.get_width(), screen.get_height()) pygame.draw.rect(screen, (255, 255, 255), fullscreen)", "= os.path.join(imgdir, line[0]) description = numpy.array([float(n) for n in line[1:]])", "line = line.strip().split(\"\\t\") img = os.path.join(imgdir, line[0]) description = numpy.array([float(n)", "import recognition import sys, os def find_faces(pygame_capture): capture = numpy.array(pygame.surfarray.pixels3d(pygame_capture))" ]
[ "in Django. \"\"\" from resolwe.__about__ import ( # noqa: F401", "Resolwe ======= Open source enterprise dataflow engine in Django. \"\"\"", "( # noqa: F401 __author__, __copyright__, __email__, __license__, __summary__, __title__,", "source enterprise dataflow engine in Django. \"\"\" from resolwe.__about__ import", "engine in Django. \"\"\" from resolwe.__about__ import ( # noqa:", "import ( # noqa: F401 __author__, __copyright__, __email__, __license__, __summary__,", "Open source enterprise dataflow engine in Django. \"\"\" from resolwe.__about__", "resolwe.__about__ import ( # noqa: F401 __author__, __copyright__, __email__, __license__,", "\"\"\" from resolwe.__about__ import ( # noqa: F401 __author__, __copyright__,", "F401 __author__, __copyright__, __email__, __license__, __summary__, __title__, __url__, __version__, )", "Django. \"\"\" from resolwe.__about__ import ( # noqa: F401 __author__,", "\"\"\".. Ignore pydocstyle D400. ======= Resolwe ======= Open source enterprise", "pydocstyle D400. ======= Resolwe ======= Open source enterprise dataflow engine", "from resolwe.__about__ import ( # noqa: F401 __author__, __copyright__, __email__,", "======= Open source enterprise dataflow engine in Django. \"\"\" from", "<reponame>plojyon/resolwe<gh_stars>10-100 \"\"\".. Ignore pydocstyle D400. ======= Resolwe ======= Open source", "Ignore pydocstyle D400. ======= Resolwe ======= Open source enterprise dataflow", "noqa: F401 __author__, __copyright__, __email__, __license__, __summary__, __title__, __url__, __version__,", "# noqa: F401 __author__, __copyright__, __email__, __license__, __summary__, __title__, __url__,", "dataflow engine in Django. \"\"\" from resolwe.__about__ import ( #", "======= Resolwe ======= Open source enterprise dataflow engine in Django.", "D400. ======= Resolwe ======= Open source enterprise dataflow engine in", "enterprise dataflow engine in Django. \"\"\" from resolwe.__about__ import (" ]
[ "fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight', transparent=True) # %% [markdown] # ## Visualizing distribution", "%% for column in df.iloc[:, 3:-1].columns: hmap = som.get_weights()[:, :,", "'grupo' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for", "value_name='ractopamine') plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) ax = sns.countplot(data=df_melt,", "= SOM(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5, neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=True, cyclic_y=True, data=df_train)", ":, df.iloc[:, 3:-1].columns.get_loc(column)].T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap,", "ratio between the # first two principal components of the", "by filename:\", df.duplicated(subset=['file_name']).value_counts(), sep='\\n') df.drop_duplicates(subset=['file_name'], inplace=True) print(\"Duplicates by (datetime, ala,", "by 'grupo' print(df.groupby('grupo')['rac'].count()) column = 'grupo' class_assignments = som.label_map(np.array(df_train), np.array(df[column]))", "input_len=df_train.shape[1], learning_rate=0.5, neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=True, cyclic_y=True, data=df_train) # Training SOM", "df_train = df.iloc[:, 3:-1].copy() df_train = scaler.fit_transform(df_train) # %% #", "np.array(df[column])) hmap = np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()):", "continue hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax =", "pickle import seaborn as sns import sklearn.preprocessing from python_som import", "ignore df_tmp = df_tmp.reset_index() df_tmp['hour'] = df_tmp['datetime'].dt.hour plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\",", "df.drop_duplicates(subset=['file_name'], inplace=True) print(\"Duplicates by (datetime, ala, grupo):\", df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(),", "ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine') for p in ax.patches: ax.annotate(f'\\n{p.get_height()}',", "first two principal components of the train dataset som_x: int", "som_x, som_y = som.get_shape() print('SOM shape:', (som_x, som_y)) # %%", "frequent label # %% df['days'] = df.index.date df['days'] = (df['days']", "= pickle.load(f) except FileNotFoundError: som = SOM(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5,", "-1 hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax =", "try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = -1", "# Training SOM som.weight_initialization(mode='linear', data=df_train) som.train(data=df_train, mode='random', verbose=True) with open(f'./{FILE_PREFIX}.obj',", "df_tmp['hour'] = df_tmp['datetime'].dt.hour plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp['hour'],", "'ala', 'grupo']).value_counts(), sep='\\n') df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True) # Rebuilding dataframe", "som_x: int = 64 try: with open(f'./{FILE_PREFIX}.obj', 'rb') as f:", "plt.subplots(1, 2, figsize=(16, 9)) sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True) sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako',", "%% # Dropping tail of dataset for class balancing #", "for and dropping duplicates # %% # Resetting index for", "column in df.iloc[:, 3:-1].columns: hmap = som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T", "# first two principal components of the train dataset som_x:", "sns.heatmap(hmap, cmap=sns.diverging_palette(150, 250, s=100, l=20, sep=1, n=26, center='light'), center=12) ax.invert_yaxis()", "9)) ax = sns.heatmap(hmap, cmap=sns.diverging_palette(150, 250, s=100, l=20, sep=1, n=26,", "the # first two principal components of the train dataset", "df['days'] = df.index.date df['days'] = (df['days'] - df['days'][0]) df['days'] =", "j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] + 1", "inplace=True) # %% # Filtering dataset by 'group' df =", "will be assigned based on the ratio between the #", "hue=df_tmp['rac'], orient='h') plt.draw() # %% df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine')", "import librosa.display as lrdisp import numpy as np import pandas", "MinMaxScaler scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1)) df_train = df.iloc[:, 3:-1].copy() df_train", "= plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.color_palette(palette=[\"#000000\", \"blue\", \"orange\"], n_colors=3),", "= df.iloc[:, 3:-1].copy() df_train = scaler.fit_transform(df_train) # %% # Defining", "cyclic_x=True, cyclic_y=True, data=df_train) # Training SOM som.weight_initialization(mode='linear', data=df_train) som.train(data=df_train, mode='random',", "(day, hour, ...) # Each node is colorized according to", "(som_x, som_y)) # %% # Visualizing distance matrix and activation", "column = 'days' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x,", "df_train = scaler.fit_transform(df_train) # %% # Defining first element of", "distribution of features # %% for column in df.iloc[:, 3:-1].columns:", "tail of dataset for class balancing # tail_size = abs(", "j)].most_common()[0][0] + 1 except Exception: continue hmap = hmap.T fig", "ax.patches: ax.annotate(f'\\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()), ha='center', va='top', color='white', size=18)", "type: ignore df.sort_index(inplace=True) # %% [markdown] # ## Checking for", "Filtering dataset by 'group' df = df[df['grupo'] == 1] #", "matrix and activation matrix umatrix = som.distance_matrix() fig, (ax1, ax2)", "df['days'][0]) df['days'] = df['days'].apply(lambda x: x.days) df['hour'] = df.index.hour #", "Exception: continue hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax", "import SOM FILE_PREFIX = 'som64_u_grupo1' # %% [markdown] # #", "scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1)) df_train = df.iloc[:, 3:-1].copy() df_train =", "of audios by metadata (day, hour, ...) # Each node", "# %% # Dropping tail of dataset for class balancing", "of dataset for class balancing # tail_size = abs( #", "FileNotFoundError: som = SOM(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5, neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=True,", "f) # %% som_x, som_y = som.get_shape() print('SOM shape:', (som_x,", "mode='random', verbose=True) with open(f'./{FILE_PREFIX}.obj', 'wb') as f: pickle.dump(som, f) #", "cmap=sns.color_palette(palette=[\"#000000\", \"blue\", \"orange\"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png',", "som_y)) for i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i,", "in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j]", "= pd.melt(df, value_vars=['rac'], value_name='ractopamine') plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0))", "hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.diverging_palette(150, 250,", "'days' print(df.groupby('days')['rac'].count()) column = 'days' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap", "'rac' distribution class_assignments = som.label_map(np.array(df_train), np.array(df['rac'])) hmap = np.zeros((som_x, som_y))", "sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h') plt.draw() # %% df_melt = pd.melt(df,", "metadata (day, hour, ...) # Each node is colorized according", "distance matrix and activation matrix umatrix = som.distance_matrix() fig, (ax1,", "Dropping tail of dataset for class balancing # tail_size =", "Rebuilding dataframe index df.set_index('datetime', inplace=True) # %% # Filtering dataset", "df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] = False df_tmp.loc['2020-09-22':, 'rac']", "ala, grupo):\", df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(), sep='\\n') df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True)", "class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for i,", "most frequent label # %% df['days'] = df.index.date df['days'] =", "# %% for column in df.iloc[:, 3:-1].columns: hmap = som.get_weights()[:,", "train dataset som_x: int = 64 try: with open(f'./{FILE_PREFIX}.obj', 'rb')", "plt.show() # %% # Visualizing by 'hour' print(df.groupby('hour')['rac'].count()) column =", "of SOM shape # Second element will be assigned based", "scaler.fit_transform(df_train) # %% # Defining first element of SOM shape", "dropping duplicates # %% # Resetting index for duplicate analysis", "[markdown] # ## Visualizing distribution of audios by metadata (day,", "separately fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True)", "ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight', transparent=True) # %% [markdown] # ## Visualizing", "...) # Each node is colorized according to its most", "is colorized according to its most frequent label # %%", "df_tmp = df_tmp.reset_index() df_tmp['hour'] = df_tmp['datetime'].dt.hour plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\",", "fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', robust=True) ax.invert_yaxis()", "%% # Visualizing by 'days' print(df.groupby('days')['rac'].count()) column = 'days' class_assignments", "from python_som import SOM FILE_PREFIX = 'som64_u_grupo1' # %% [markdown]", "plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.color_palette(palette=[\"#000000\", \"blue\", \"orange\"], n_colors=3), cbar_kws={'ticks':", "neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=True, cyclic_y=True, data=df_train) # Training SOM som.weight_initialization(mode='linear', data=df_train)", "and activation matrix umatrix = som.distance_matrix() fig, (ax1, ax2) =", "for i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]", "# %% [markdown] # ## Visualizing distribution of sample dates", "robust=True, cmap='BrBG') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.close(fig=fig) # %% [markdown]", "# %% # Defining first element of SOM shape #", "df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\",", "%% # Defining first element of SOM shape # Second", "class_assignments = som.label_map(np.array(df_train), np.array(df['rac'])) hmap = np.zeros((som_x, som_y)) for i,", "transparent=True) fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', robust=True)", "som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for i, j in", "ha='center', va='top', color='white', size=18) plt.draw() # %% # using sklearn's", "np import pandas as pd import pickle import seaborn as", "index_col=0, verbose=True) df.index = pd.to_datetime(df.index) df['rac'] = False df.loc['2020-09-22':, 'rac']", "dataset # %% df = pd.read_csv('features_means.csv', index_col=0, verbose=True) df.index =", "robust=True) ax1.invert_yaxis() ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight', transparent=True) plt.draw() # %% #", "fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap='viridis') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',", "# %% df['days'] = df.index.date df['days'] = (df['days'] - df['days'][0])", "palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac']) plt.draw() df_tmp = pd.DataFrame(df['file_name'].resample('1H').count())", "hue=df_tmp['rac']) plt.draw() df_tmp = pd.DataFrame(df['file_name'].resample('1H').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name']", "[markdown] # ## Checking for and dropping duplicates # %%", "= som.get_shape() print('SOM shape:', (som_x, som_y)) # %% # Visualizing", "sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', ax=ax2, robust=True) ax1.invert_yaxis() ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight', transparent=True) plt.draw()", "i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except", "= pd.read_csv('features_means.csv', index_col=0, verbose=True) df.index = pd.to_datetime(df.index) df['rac'] = False", "- df['days'][0]) df['days'] = df['days'].apply(lambda x: x.days) df['hour'] = df.index.hour", "= som.label_map(np.array(df_train), np.array(df['rac'])) hmap = np.zeros((som_x, som_y)) for i, j", "by 'hour' print(df.groupby('hour')['rac'].count()) column = 'hour' class_assignments = som.label_map(np.array(df_train), np.array(df[column]))", "pd.DataFrame(df['file_name'].resample('1D').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] = False df_tmp.loc['2020-09-22':,", "plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac']) plt.draw()", "va='top', color='white', size=18) plt.draw() # %% # using sklearn's MinMaxScaler", "ax = sns.heatmap(hmap, robust=True, cmap='BrBG') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.close(fig=fig)", "first element of SOM shape # Second element will be", "# type: ignore df_tmp = df_tmp.reset_index() df_tmp['hour'] = df_tmp['datetime'].dt.hour plt.figure(figsize=(10,", "= sns.heatmap(hmap, robust=True, cmap='BrBG') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.close(fig=fig) #", "df['rac'] = False df.loc['2020-09-22':, 'rac'] = True # type: ignore", "print(df.groupby('days')['rac'].count()) column = 'days' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap =", "pd import pickle import seaborn as sns import sklearn.preprocessing from", "# # Loading dataset # %% df = pd.read_csv('features_means.csv', index_col=0,", "plt.close(fig=fig) # %% [markdown] # ## Visualizing distribution of audios", "seaborn as sns import sklearn.preprocessing from python_som import SOM FILE_PREFIX", "# using sklearn's MinMaxScaler scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1)) df_train =", "%% df_tmp = pd.DataFrame(df['file_name'].resample('1D').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac']", "palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h') plt.draw() # %%", "som.weight_initialization(mode='linear', data=df_train) som.train(data=df_train, mode='random', verbose=True) with open(f'./{FILE_PREFIX}.obj', 'wb') as f:", "%% # Visualizing 'rac' distribution class_assignments = som.label_map(np.array(df_train), np.array(df['rac'])) hmap", "SOM shape # Second element will be assigned based on", "sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] + 1 except Exception:", "orient='h') plt.draw() # %% df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine') plt.figure(figsize=(10,", "# Defining first element of SOM shape # Second element", "df['days'] = (df['days'] - df['days'][0]) df['days'] = df['days'].apply(lambda x: x.days)", "3:-1].columns.get_loc(column)].T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, robust=True, cmap='BrBG')", "matplotlib.pyplot as plt # import librosa as lr # import", "+ 1 except Exception: continue hmap = hmap.T fig =", "class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = -1 hmap = hmap.T", "10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine')", "python-som with audio dataset # %% [markdown] # # Imports", "duplicate analysis df.reset_index(inplace=True) print(\"Duplicates by filename:\", df.duplicated(subset=['file_name']).value_counts(), sep='\\n') df.drop_duplicates(subset=['file_name'], inplace=True)", "som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T fig = plt.figure(figsize=(16, 9)) ax =", "fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'days'", "ax2) = plt.subplots(1, 2, figsize=(16, 9)) sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True)", "1]) - len(df[df['rac'].astype(int) == 0])) # df.drop(df.tail(tail_size).index, inplace=True) # %%", "ax = sns.heatmap(hmap, cmap=sns.color_palette(palette=[\"#000000\", \"blue\", \"orange\"], n_colors=3), cbar_kws={'ticks': [0, 1,", "Imports # %% import matplotlib.pyplot as plt # import librosa", "cmap=sns.color_palette(palette=[\"#000000\", \"blue\", \"orange\"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',", "# %% # Visualizing by 'hour' print(df.groupby('hour')['rac'].count()) column = 'hour'", "ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.close(fig=fig) # %% [markdown] # ##", "bbox_inches='tight', transparent=True) plt.close(fig=fig) # %% [markdown] # ## Visualizing distribution", "plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h')", "as sns import sklearn.preprocessing from python_som import SOM FILE_PREFIX =", "cmap='bone_r', ax=ax1, robust=True) sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', ax=ax2, robust=True) ax1.invert_yaxis() ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png',", "element of SOM shape # Second element will be assigned", "# Rebuilding dataframe index df.set_index('datetime', inplace=True) # %% # Filtering", "n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show()", "= True # type: ignore df.sort_index(inplace=True) # %% [markdown] #", "= class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = 0 hmap =", "x.days) df['hour'] = df.index.hour # %% # Visualizing 'rac' distribution", "= df.index.hour # %% # Visualizing 'rac' distribution class_assignments =", "1] # %% # Dropping tail of dataset for class", "Visualizing distribution of sample dates # %% df_tmp = pd.DataFrame(df['file_name'].resample('1D').count())", "# import librosa.display as lrdisp import numpy as np import", "\"blue\", \"orange\"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight',", "cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() #", "# %% df = pd.read_csv('features_means.csv', index_col=0, verbose=True) df.index = pd.to_datetime(df.index)", "bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'grupo' print(df.groupby('grupo')['rac'].count())", "True # type: ignore df.sort_index(inplace=True) # %% [markdown] # ##", "cyclic_y=True, data=df_train) # Training SOM som.weight_initialization(mode='linear', data=df_train) som.train(data=df_train, mode='random', verbose=True)", "= sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1)) df_train = df.iloc[:, 3:-1].copy() df_train = scaler.fit_transform(df_train)", "tail_size = abs( # len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int) ==", "filename:\", df.duplicated(subset=['file_name']).value_counts(), sep='\\n') df.drop_duplicates(subset=['file_name'], inplace=True) print(\"Duplicates by (datetime, ala, grupo):\",", "colorized according to its most frequent label # %% df['days']", "som_y = som.get_shape() print('SOM shape:', (som_x, som_y)) # %% #", "cmap='viridis') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing", "dataset som_x: int = 64 try: with open(f'./{FILE_PREFIX}.obj', 'rb') as", "'rb') as f: som = pickle.load(f) except FileNotFoundError: som =", "plt.figure(figsize=(16, 9)) ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight',", "# len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int) == 0])) # df.drop(df.tail(tail_size).index,", "'hour' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for", "# %% [markdown] # ## Visualizing distribution of features #", "df['days'] = df['days'].apply(lambda x: x.days) df['hour'] = df.index.hour # %%", "Second element will be assigned based on the ratio between", "import matplotlib.pyplot as plt # import librosa as lr #", "principal components of the train dataset som_x: int = 64", "# %% # Visualizing by 'days' print(df.groupby('days')['rac'].count()) column = 'days'", "desat=1.0)) ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine') for p in ax.patches:", "= df[df['grupo'] == 1] # %% # Dropping tail of", "inplace=True) # %% [markdown] # ## Visualizing distribution of sample", "sep=1, n=26, center='light'), center=12) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() #", "\"orange\"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True)", "= hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.color_palette(palette=[\"#000000\",", "hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] + 1 except Exception: continue hmap", "inplace=True) print(\"Duplicates by (datetime, ala, grupo):\", df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(), sep='\\n')", "%% [markdown] # ## Visualizing distribution of sample dates #", "transparent=True) plt.show() # %% # Visualizing by 'hour' print(df.groupby('hour')['rac'].count()) column", "python_som import SOM FILE_PREFIX = 'som64_u_grupo1' # %% [markdown] #", "x: x.days) df['hour'] = df.index.hour # %% # Visualizing 'rac'", "plt # import librosa as lr # import librosa.display as", "hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = -1 hmap", "%% # Visualizing distance matrix anc activation matrix separately fig", "+ 0.2, p.get_height()), ha='center', va='top', color='white', size=18) plt.draw() # %%", "except Exception: hmap[i][j] = 0 hmap = hmap.T fig =", "# df.drop(df.tail(tail_size).index, inplace=True) # %% [markdown] # ## Visualizing distribution", "\"orange\"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight', transparent=True)", "# tail_size = abs( # len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int)", "ignore df.sort_index(inplace=True) # %% [markdown] # ## Checking for and", "transparent=True) # %% [markdown] # ## Visualizing distribution of features", "cmap=sns.diverging_palette(150, 250, s=100, l=20, sep=1, n=26, center='light'), center=12) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',", "in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] + 1 except", "= df_tmp.reset_index() df_tmp['hour'] = df_tmp['datetime'].dt.hour plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6,", "df.index = pd.to_datetime(df.index) df['rac'] = False df.loc['2020-09-22':, 'rac'] = True", "%% # Resetting index for duplicate analysis df.reset_index(inplace=True) print(\"Duplicates by", "import pandas as pd import pickle import seaborn as sns", "matrix separately fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(umatrix.T, cmap='bone_r',", "= False df.loc['2020-09-22':, 'rac'] = True # type: ignore df.sort_index(inplace=True)", "sample dates # %% df_tmp = pd.DataFrame(df['file_name'].resample('1D').count()) df_tmp['count'] = df_tmp['file_name']", "robust=True) sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', ax=ax2, robust=True) ax1.invert_yaxis() ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight', transparent=True)", "0 hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax =", "label # %% df['days'] = df.index.date df['days'] = (df['days'] -", "sns.heatmap(hmap, cmap=sns.color_palette(palette=[\"#000000\", \"blue\", \"orange\"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis()", "df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] = False df_tmp.loc['2020-09-22':, 'rac'] = True", "on the ratio between the # first two principal components", "sns.heatmap(hmap, cmap='viridis') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% #", "cmap='bone_r', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight', transparent=True) fig = plt.figure(figsize=(16, 9))", "cmap='mako', ax=ax2, robust=True) ax1.invert_yaxis() ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight', transparent=True) plt.draw() #", "plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.diverging_palette(150, 250, s=100, l=20, sep=1,", "'grupo'], inplace=True) # Rebuilding dataframe index df.set_index('datetime', inplace=True) # %%", "desat=1.0)) sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h') plt.draw() # %% df_melt =", "# ## Visualizing distribution of features # %% for column", "250, s=100, l=20, sep=1, n=26, center='light'), center=12) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight',", "'rac'] = True # type: ignore df.sort_index(inplace=True) # %% [markdown]", "del df_tmp['file_name'] df_tmp['rac'] = False df_tmp.loc['2020-09-22':, 'rac'] = True #", "[0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight', transparent=True) plt.show() # %%", "df_tmp = pd.DataFrame(df['file_name'].resample('1H').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] =", "df['days'].apply(lambda x: x.days) df['hour'] = df.index.hour # %% # Visualizing", "%% # Visualizing distance matrix and activation matrix umatrix =", "9)) ax = sns.heatmap(hmap, robust=True, cmap='BrBG') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True)", "inplace=True) # Rebuilding dataframe index df.set_index('datetime', inplace=True) # %% #", "0.2, p.get_height()), ha='center', va='top', color='white', size=18) plt.draw() # %% #", "of the train dataset som_x: int = 64 try: with", "fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight', transparent=True) plt.draw() # %% # Visualizing distance matrix", "size=18) plt.draw() # %% # using sklearn's MinMaxScaler scaler =", "hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.color_palette(palette=[\"#000000\", \"blue\",", "j)].most_common()[0][0] except Exception: hmap[i][j] = -1 hmap = hmap.T fig", "lr # import librosa.display as lrdisp import numpy as np", "as lr # import librosa.display as lrdisp import numpy as", "data=df_train) som.train(data=df_train, mode='random', verbose=True) with open(f'./{FILE_PREFIX}.obj', 'wb') as f: pickle.dump(som,", "Defining first element of SOM shape # Second element will", "Visualizing 'rac' distribution class_assignments = som.label_map(np.array(df_train), np.array(df['rac'])) hmap = np.zeros((som_x,", "3:-1].columns: hmap = som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T fig = plt.figure(figsize=(16,", "umatrix = som.distance_matrix() fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16,", "## Checking for and dropping duplicates # %% # Resetting", "= True # type: ignore df_tmp = df_tmp.reset_index() df_tmp['hour'] =", "robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight', transparent=True) fig = plt.figure(figsize=(16, 9)) ax", "color='white', size=18) plt.draw() # %% # using sklearn's MinMaxScaler scaler", "= np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()): try: hmap[i][j]", "Exception: hmap[i][j] = -1 hmap = hmap.T fig = plt.figure(figsize=(16,", "3:-1].copy() df_train = scaler.fit_transform(df_train) # %% # Defining first element", "be assigned based on the ratio between the # first", "column = 'grupo' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x,", "transparent=True) plt.show() # %% # Visualizing by 'days' print(df.groupby('days')['rac'].count()) column", "the ratio between the # first two principal components of", "= df_tmp['datetime'].dt.hour plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp['hour'], x=df_tmp['count'],", "'rac'] = True # type: ignore df_tmp = df_tmp.reset_index() df_tmp['hour']", "[markdown] # ## Visualizing distribution of sample dates # %%", "verbose=True) df.index = pd.to_datetime(df.index) df['rac'] = False df.loc['2020-09-22':, 'rac'] =", "= df['days'].apply(lambda x: x.days) df['hour'] = df.index.hour # %% #", "= pd.DataFrame(df['file_name'].resample('1H').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] = False", "sep='\\n') df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True) # Rebuilding dataframe index df.set_index('datetime',", "= sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine') for p in ax.patches: ax.annotate(f'\\n{p.get_height()}', (p.get_x()", "audios by metadata (day, hour, ...) # Each node is", "index df.set_index('datetime', inplace=True) # %% # Filtering dataset by 'group'", "dates # %% df_tmp = pd.DataFrame(df['file_name'].resample('1D').count()) df_tmp['count'] = df_tmp['file_name'] del", "plt.figure(figsize=(16, 9)) ax = sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight',", "sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine') for p in ax.patches: ax.annotate(f'\\n{p.get_height()}', (p.get_x() +", "f: pickle.dump(som, f) # %% som_x, som_y = som.get_shape() print('SOM", "ax=ax1, robust=True) sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', ax=ax2, robust=True) ax1.invert_yaxis() ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight',", "som.train(data=df_train, mode='random', verbose=True) with open(f'./{FILE_PREFIX}.obj', 'wb') as f: pickle.dump(som, f)", "# # Imports # %% import matplotlib.pyplot as plt #", "som = SOM(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5, neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=True, cyclic_y=True,", "fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.color_palette(palette=[\"#000000\", \"blue\", \"orange\"],", "ignore plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac'])", "robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight', transparent=True) # %% [markdown] # ##", "import pickle import seaborn as sns import sklearn.preprocessing from python_som", "with open(f'./{FILE_PREFIX}.obj', 'rb') as f: som = pickle.load(f) except FileNotFoundError:", "hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap,", "Testing python-som with audio dataset # %% [markdown] # #", "distribution class_assignments = som.label_map(np.array(df_train), np.array(df['rac'])) hmap = np.zeros((som_x, som_y)) for", "Visualizing distance matrix and activation matrix umatrix = som.distance_matrix() fig,", "sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h') plt.draw() #", "analysis df.reset_index(inplace=True) print(\"Duplicates by filename:\", df.duplicated(subset=['file_name']).value_counts(), sep='\\n') df.drop_duplicates(subset=['file_name'], inplace=True) print(\"Duplicates", "[0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %%", "balancing # tail_size = abs( # len(df[df['rac'].astype(int) == 1]) -", "# ## Visualizing distribution of audios by metadata (day, hour,", "sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True) sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', ax=ax2, robust=True) ax1.invert_yaxis() ax2.invert_yaxis()", "of sample dates # %% df_tmp = pd.DataFrame(df['file_name'].resample('1D').count()) df_tmp['count'] =", "x='ractopamine', hue='ractopamine') for p in ax.patches: ax.annotate(f'\\n{p.get_height()}', (p.get_x() + 0.2,", "%% [markdown] # ## Visualizing distribution of audios by metadata", "%% # Visualizing by 'hour' print(df.groupby('hour')['rac'].count()) column = 'hour' class_assignments", "# Visualizing by 'hour' print(df.groupby('hour')['rac'].count()) column = 'hour' class_assignments =", "Visualizing by 'days' print(df.groupby('days')['rac'].count()) column = 'days' class_assignments = som.label_map(np.array(df_train),", "librosa.display as lrdisp import numpy as np import pandas as", "Resetting index for duplicate analysis df.reset_index(inplace=True) print(\"Duplicates by filename:\", df.duplicated(subset=['file_name']).value_counts(),", "plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, robust=True, cmap='BrBG') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight',", "# Testing python-som with audio dataset # %% [markdown] #", "pandas as pd import pickle import seaborn as sns import", "transparent=True) plt.show() # %% # Visualizing by 'grupo' print(df.groupby('grupo')['rac'].count()) column", "except Exception: hmap[i][j] = -1 hmap = hmap.T fig =", "- len(df[df['rac'].astype(int) == 0])) # df.drop(df.tail(tail_size).index, inplace=True) # %% [markdown]", "= sns.heatmap(hmap, cmap=sns.diverging_palette(150, 250, s=100, l=20, sep=1, n=26, center='light'), center=12)", "anc activation matrix separately fig = plt.figure(figsize=(16, 9)) ax =", "# %% # using sklearn's MinMaxScaler scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1))", "(p.get_x() + 0.2, p.get_height()), ha='center', va='top', color='white', size=18) plt.draw() #", "verbose=True) with open(f'./{FILE_PREFIX}.obj', 'wb') as f: pickle.dump(som, f) # %%", "as f: som = pickle.load(f) except FileNotFoundError: som = SOM(x=som_x,", "= sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight', transparent=True) # %%", "= hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap='viridis')", "# Visualizing distance matrix and activation matrix umatrix = som.distance_matrix()", "\"blue\", \"orange\"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight',", "Visualizing distance matrix anc activation matrix separately fig = plt.figure(figsize=(16,", "its most frequent label # %% df['days'] = df.index.date df['days']", "= df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] = False df_tmp.loc['2020-09-22':, 'rac'] =", "p.get_height()), ha='center', va='top', color='white', size=18) plt.draw() # %% # using", "df.iloc[:, 3:-1].columns: hmap = som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T fig =", "= 'hour' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y))", "df_tmp['rac'] = False df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore", "fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True) ax.invert_yaxis()", "# %% # Resetting index for duplicate analysis df.reset_index(inplace=True) print(\"Duplicates", "import librosa as lr # import librosa.display as lrdisp import", "Checking for and dropping duplicates # %% # Resetting index", "= sns.heatmap(hmap, cmap=sns.color_palette(palette=[\"#000000\", \"blue\", \"orange\"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]})", "print(\"Duplicates by (datetime, ala, grupo):\", df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(), sep='\\n') df.drop_duplicates(subset=['datetime',", "# %% # Visualizing distance matrix anc activation matrix separately", "%% df = pd.read_csv('features_means.csv', index_col=0, verbose=True) df.index = pd.to_datetime(df.index) df['rac']", "between the # first two principal components of the train", "df = df[df['grupo'] == 1] # %% # Dropping tail", "import numpy as np import pandas as pd import pickle", "df.drop(df.tail(tail_size).index, inplace=True) # %% [markdown] # ## Visualizing distribution of", "s=100, l=20, sep=1, n=26, center='light'), center=12) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True)", "= sns.heatmap(umatrix.T, cmap='bone_r', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight', transparent=True) fig =", "9)) ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight', transparent=True)", "64 try: with open(f'./{FILE_PREFIX}.obj', 'rb') as f: som = pickle.load(f)", "df_tmp = pd.DataFrame(df['file_name'].resample('1D').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] =", "for duplicate analysis df.reset_index(inplace=True) print(\"Duplicates by filename:\", df.duplicated(subset=['file_name']).value_counts(), sep='\\n') df.drop_duplicates(subset=['file_name'],", "== 1] # %% # Dropping tail of dataset for", "# type: ignore df.sort_index(inplace=True) # %% [markdown] # ## Checking", "'ala', 'grupo'], inplace=True) # Rebuilding dataframe index df.set_index('datetime', inplace=True) #", "# %% # Visualizing distance matrix and activation matrix umatrix", "pd.to_datetime(df.index) df['rac'] = False df.loc['2020-09-22':, 'rac'] = True # type:", "cmap='mako', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight', transparent=True) # %% [markdown] #", "element will be assigned based on the ratio between the", "= plt.figure(figsize=(16, 9)) ax = sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png',", "= hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.diverging_palette(150,", "of features # %% for column in df.iloc[:, 3:-1].columns: hmap", "value_vars=['rac'], value_name='ractopamine') plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) ax =", "# %% [markdown] # # Loading dataset # %% df", "hmap = som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T fig = plt.figure(figsize=(16, 9))", "= plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.diverging_palette(150, 250, s=100, l=20,", "fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'hour'", "print('SOM shape:', (som_x, som_y)) # %% # Visualizing distance matrix", "ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight', transparent=True) fig = plt.figure(figsize=(16, 9)) ax =", "l=20, sep=1, n=26, center='light'), center=12) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show()", "= True # type: ignore plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6,", "Each node is colorized according to its most frequent label", "distance matrix anc activation matrix separately fig = plt.figure(figsize=(16, 9))", "= False df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore df_tmp", "= plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap='viridis') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight',", "df.set_index('datetime', inplace=True) # %% # Filtering dataset by 'group' df", "ax.annotate(f'\\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()), ha='center', va='top', color='white', size=18) plt.draw()", "dataset by 'group' df = df[df['grupo'] == 1] # %%", "sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1)) df_train = df.iloc[:, 3:-1].copy() df_train = scaler.fit_transform(df_train) #", "desat=1.0)) sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac']) plt.draw() df_tmp = pd.DataFrame(df['file_name'].resample('1H').count()) df_tmp['count'] =", "ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by", "%% [markdown] # # Imports # %% import matplotlib.pyplot as", "df.iloc[:, 3:-1].copy() df_train = scaler.fit_transform(df_train) # %% # Defining first", "Loading dataset # %% df = pd.read_csv('features_means.csv', index_col=0, verbose=True) df.index", "%% [markdown] # ## Visualizing distribution of features # %%", "df.duplicated(subset=['file_name']).value_counts(), sep='\\n') df.drop_duplicates(subset=['file_name'], inplace=True) print(\"Duplicates by (datetime, ala, grupo):\", df.duplicated(subset=['datetime',", "sns.heatmap(umatrix.T, cmap='bone_r', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight', transparent=True) fig = plt.figure(figsize=(16,", "Visualizing distribution of audios by metadata (day, hour, ...) #", "plt.show() # %% # Visualizing by 'grupo' print(df.groupby('grupo')['rac'].count()) column =", "j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception:", "sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine') for", "'group' df = df[df['grupo'] == 1] # %% # Dropping", "as pd import pickle import seaborn as sns import sklearn.preprocessing", "shape # Second element will be assigned based on the", "df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine') plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6,", "fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight', transparent=True) fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(som.activation_matrix(data=df_train).T,", "plt.draw() # %% # Visualizing distance matrix anc activation matrix", "i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] +", "df.sort_index(inplace=True) # %% [markdown] # ## Checking for and dropping", "data=df_train) # Training SOM som.weight_initialization(mode='linear', data=df_train) som.train(data=df_train, mode='random', verbose=True) with", "ax1.invert_yaxis() ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight', transparent=True) plt.draw() # %% # Visualizing", "with audio dataset # %% [markdown] # # Imports #", "plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap='viridis') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True)", "= 0 hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax", "matrix umatrix = som.distance_matrix() fig, (ax1, ax2) = plt.subplots(1, 2,", "transparent=True) plt.close(fig=fig) # %% [markdown] # ## Visualizing distribution of", "%% som_x, som_y = som.get_shape() print('SOM shape:', (som_x, som_y)) #", "SOM som.weight_initialization(mode='linear', data=df_train) som.train(data=df_train, mode='random', verbose=True) with open(f'./{FILE_PREFIX}.obj', 'wb') as", "(ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9)) sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1,", "SOM FILE_PREFIX = 'som64_u_grupo1' # %% [markdown] # # Loading", "for class balancing # tail_size = abs( # len(df[df['rac'].astype(int) ==", "[markdown] # ## Visualizing distribution of features # %% for", "class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = 0 hmap = hmap.T", "the train dataset som_x: int = 64 try: with open(f'./{FILE_PREFIX}.obj',", "ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by", "int = 64 try: with open(f'./{FILE_PREFIX}.obj', 'rb') as f: som", "= som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for i, j", "9)) sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True) sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', ax=ax2, robust=True) ax1.invert_yaxis()", "= sns.heatmap(hmap, cmap='viridis') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %%", "ax=ax2, robust=True) ax1.invert_yaxis() ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight', transparent=True) plt.draw() # %%", "df_tmp.reset_index() df_tmp['hour'] = df_tmp['datetime'].dt.hour plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0))", "two principal components of the train dataset som_x: int =", "numpy as np import pandas as pd import pickle import", "by (datetime, ala, grupo):\", df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(), sep='\\n') df.drop_duplicates(subset=['datetime', 'ala',", "n=26, center='light'), center=12) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %%", "# %% [markdown] # # Testing python-som with audio dataset", "as np import pandas as pd import pickle import seaborn", "pd.DataFrame(df['file_name'].resample('1H').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] = False df_tmp.loc['2020-09-22':,", "in ax.patches: ax.annotate(f'\\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()), ha='center', va='top', color='white',", "open(f'./{FILE_PREFIX}.obj', 'rb') as f: som = pickle.load(f) except FileNotFoundError: som", "# Visualizing by 'days' print(df.groupby('days')['rac'].count()) column = 'days' class_assignments =", "= plt.figure(figsize=(16, 9)) ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png',", "using sklearn's MinMaxScaler scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1)) df_train = df.iloc[:,", "df = pd.read_csv('features_means.csv', index_col=0, verbose=True) df.index = pd.to_datetime(df.index) df['rac'] =", "df_tmp['file_name'] df_tmp['rac'] = False df_tmp.loc['2020-09-22':, 'rac'] = True # type:", "2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing", "# Each node is colorized according to its most frequent", "== 0])) # df.drop(df.tail(tail_size).index, inplace=True) # %% [markdown] # ##", "as f: pickle.dump(som, f) # %% som_x, som_y = som.get_shape()", "hmap[i][j] = -1 hmap = hmap.T fig = plt.figure(figsize=(16, 9))", "index for duplicate analysis df.reset_index(inplace=True) print(\"Duplicates by filename:\", df.duplicated(subset=['file_name']).value_counts(), sep='\\n')", "import sklearn.preprocessing from python_som import SOM FILE_PREFIX = 'som64_u_grupo1' #", "abs( # len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int) == 0])) #", "class_assignments[(i, j)].most_common()[0][0] + 1 except Exception: continue hmap = hmap.T", "class balancing # tail_size = abs( # len(df[df['rac'].astype(int) == 1])", "x=df_tmp['count'], hue=df_tmp['rac']) plt.draw() df_tmp = pd.DataFrame(df['file_name'].resample('1H').count()) df_tmp['count'] = df_tmp['file_name'] del", "df.index.hour # %% # Visualizing 'rac' distribution class_assignments = som.label_map(np.array(df_train),", "# %% # Filtering dataset by 'group' df = df[df['grupo']", "bbox_inches='tight', transparent=True) # %% [markdown] # ## Visualizing distribution of", "'grupo' print(df.groupby('grupo')['rac'].count()) column = 'grupo' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap", "0])) # df.drop(df.tail(tail_size).index, inplace=True) # %% [markdown] # ## Visualizing", "bbox_inches='tight', transparent=True) plt.draw() # %% # Visualizing distance matrix anc", "with open(f'./{FILE_PREFIX}.obj', 'wb') as f: pickle.dump(som, f) # %% som_x,", "# Dropping tail of dataset for class balancing # tail_size", "= pd.to_datetime(df.index) df['rac'] = False df.loc['2020-09-22':, 'rac'] = True #", "[markdown] # # Testing python-som with audio dataset # %%", "by 'group' df = df[df['grupo'] == 1] # %% #", "hmap[i][j] = 0 hmap = hmap.T fig = plt.figure(figsize=(16, 9))", "%% # Filtering dataset by 'group' df = df[df['grupo'] ==", "som.get_shape() print('SOM shape:', (som_x, som_y)) # %% # Visualizing distance", "%% [markdown] # # Testing python-som with audio dataset #", "distribution of audios by metadata (day, hour, ...) # Each", "df_tmp['datetime'].dt.hour plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'],", "plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) ax = sns.countplot(data=df_melt, x='ractopamine',", "pickle.dump(som, f) # %% som_x, som_y = som.get_shape() print('SOM shape:',", "bbox_inches='tight', transparent=True) fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako',", "n_colors=6, desat=1.0)) sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac']) plt.draw() df_tmp = pd.DataFrame(df['file_name'].resample('1H').count()) df_tmp['count']", "dataset # %% [markdown] # # Imports # %% import", "n_colors=6, desat=1.0)) ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine') for p in", "= som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T fig = plt.figure(figsize=(16, 9)) ax", "som.label_map(np.array(df_train), np.array(df['rac'])) hmap = np.zeros((som_x, som_y)) for i, j in", "plt.draw() df_tmp = pd.DataFrame(df['file_name'].resample('1H').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac']", "n_colors=6, desat=1.0)) sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h') plt.draw() # %% df_melt", "try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] + 1 except Exception: continue", "f: som = pickle.load(f) except FileNotFoundError: som = SOM(x=som_x, y=None,", "as plt # import librosa as lr # import librosa.display", "pd.melt(df, value_vars=['rac'], value_name='ractopamine') plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) ax", "# import librosa as lr # import librosa.display as lrdisp", "## Visualizing distribution of features # %% for column in", "distribution of sample dates # %% df_tmp = pd.DataFrame(df['file_name'].resample('1D').count()) df_tmp['count']", "audio dataset # %% [markdown] # # Imports # %%", "duplicates # %% # Resetting index for duplicate analysis df.reset_index(inplace=True)", "cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight', transparent=True) plt.show() #", "True # type: ignore plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0))", "10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h') plt.draw()", "FILE_PREFIX = 'som64_u_grupo1' # %% [markdown] # # Loading dataset", "Exception: hmap[i][j] = 0 hmap = hmap.T fig = plt.figure(figsize=(16,", "df['hour'] = df.index.hour # %% # Visualizing 'rac' distribution class_assignments", "# Resetting index for duplicate analysis df.reset_index(inplace=True) print(\"Duplicates by filename:\",", "True # type: ignore df_tmp = df_tmp.reset_index() df_tmp['hour'] = df_tmp['datetime'].dt.hour", "ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight', transparent=True) fig", "sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac']) plt.draw() df_tmp =", "df.index.date df['days'] = (df['days'] - df['days'][0]) df['days'] = df['days'].apply(lambda x:", "ax = sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight', transparent=True) #", "activation matrix separately fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(umatrix.T,", "= 'grupo' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y))", "figsize=(16, 9)) sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True) sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', ax=ax2, robust=True)", "df.loc['2020-09-22':, 'rac'] = True # type: ignore df.sort_index(inplace=True) # %%", "False df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore plt.figure(figsize=(10, 10))", "plt.draw() # %% df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine') plt.figure(figsize=(10, 10))", "10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac']) plt.draw() df_tmp", "'rac'] = True # type: ignore plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\",", "assigned based on the ratio between the # first two", "Visualizing by 'grupo' print(df.groupby('grupo')['rac'].count()) column = 'grupo' class_assignments = som.label_map(np.array(df_train),", "som.distance_matrix() fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9)) sns.heatmap(umatrix.T,", "neighborhood_function='gaussian', cyclic_x=True, cyclic_y=True, data=df_train) # Training SOM som.weight_initialization(mode='linear', data=df_train) som.train(data=df_train,", "hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap='viridis') ax.invert_yaxis()", "= plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, robust=True, cmap='BrBG') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',", "# %% # Visualizing by 'grupo' print(df.groupby('grupo')['rac'].count()) column = 'grupo'", "np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()): try: hmap[i][j] =", "== 1]) - len(df[df['rac'].astype(int) == 0])) # df.drop(df.tail(tail_size).index, inplace=True) #", "# Imports # %% import matplotlib.pyplot as plt # import", "'som64_u_grupo1' # %% [markdown] # # Loading dataset # %%", "= plt.subplots(1, 2, figsize=(16, 9)) sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True) sns.heatmap(som.activation_matrix(data=df_train).T,", "column = 'hour' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x,", "sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac']) plt.draw() df_tmp = pd.DataFrame(df['file_name'].resample('1H').count()) df_tmp['count'] = df_tmp['file_name']", "fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.diverging_palette(150, 250, s=100,", "ax = sns.heatmap(hmap, cmap=sns.diverging_palette(150, 250, s=100, l=20, sep=1, n=26, center='light'),", "fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9)) sns.heatmap(umatrix.T, cmap='bone_r',", "som_y)) # %% # Visualizing distance matrix and activation matrix", "1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% #", "according to its most frequent label # %% df['days'] =", "df.iloc[:, 3:-1].columns.get_loc(column)].T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, robust=True,", "= 64 try: with open(f'./{FILE_PREFIX}.obj', 'rb') as f: som =", "9)) ax = sns.heatmap(hmap, cmap=sns.color_palette(palette=[\"#000000\", \"blue\", \"orange\"], n_colors=3), cbar_kws={'ticks': [0,", "type: ignore plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp.index, x=df_tmp['count'],", "bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'hour' print(df.groupby('hour')['rac'].count())", "type: ignore df_tmp = df_tmp.reset_index() df_tmp['hour'] = df_tmp['datetime'].dt.hour plt.figure(figsize=(10, 10))", "len(df[df['rac'].astype(int) == 0])) # df.drop(df.tail(tail_size).index, inplace=True) # %% [markdown] #", "= df.index.date df['days'] = (df['days'] - df['days'][0]) df['days'] = df['days'].apply(lambda", "bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'days' print(df.groupby('days')['rac'].count())", "# type: ignore plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp.index,", "# ## Visualizing distribution of sample dates # %% df_tmp", "1 except Exception: continue hmap = hmap.T fig = plt.figure(figsize=(16,", "%% import matplotlib.pyplot as plt # import librosa as lr", "# %% df_tmp = pd.DataFrame(df['file_name'].resample('1D').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name']", "sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] =", "# %% [markdown] # ## Visualizing distribution of audios by", "# Filtering dataset by 'group' df = df[df['grupo'] == 1]", "# %% [markdown] # # Imports # %% import matplotlib.pyplot", "False df.loc['2020-09-22':, 'rac'] = True # type: ignore df.sort_index(inplace=True) #", "transparent=True) plt.draw() # %% # Visualizing distance matrix anc activation", "%% [markdown] # # Loading dataset # %% df =", "# %% som_x, som_y = som.get_shape() print('SOM shape:', (som_x, som_y))", "plt.draw() # %% # using sklearn's MinMaxScaler scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0,", "sns.heatmap(hmap, robust=True, cmap='BrBG') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.close(fig=fig) # %%", "= (df['days'] - df['days'][0]) df['days'] = df['days'].apply(lambda x: x.days) df['hour']", "som = pickle.load(f) except FileNotFoundError: som = SOM(x=som_x, y=None, input_len=df_train.shape[1],", "components of the train dataset som_x: int = 64 try:", "# # Testing python-som with audio dataset # %% [markdown]", "dataframe index df.set_index('datetime', inplace=True) # %% # Filtering dataset by", "j)].most_common()[0][0] except Exception: hmap[i][j] = 0 hmap = hmap.T fig", "sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight', transparent=True) # %% [markdown]", "sklearn.preprocessing from python_som import SOM FILE_PREFIX = 'som64_u_grupo1' # %%", "9)) ax = sns.heatmap(hmap, cmap='viridis') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show()", "'wb') as f: pickle.dump(som, f) # %% som_x, som_y =", "hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = 0 hmap", "= abs( # len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int) == 0]))", "except Exception: continue hmap = hmap.T fig = plt.figure(figsize=(16, 9))", "= pd.DataFrame(df['file_name'].resample('1D').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] = False", "fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, robust=True, cmap='BrBG') ax.invert_yaxis()", "Visualizing by 'hour' print(df.groupby('hour')['rac'].count()) column = 'hour' class_assignments = som.label_map(np.array(df_train),", "'hour' print(df.groupby('hour')['rac'].count()) column = 'hour' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap", "for column in df.iloc[:, 3:-1].columns: hmap = som.get_weights()[:, :, df.iloc[:,", "x=df_tmp['count'], hue=df_tmp['rac'], orient='h') plt.draw() # %% df_melt = pd.melt(df, value_vars=['rac'],", "shape:', (som_x, som_y)) # %% # Visualizing distance matrix and", "except FileNotFoundError: som = SOM(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5, neighborhood_radius=1.0, neighborhood_function='gaussian',", "import seaborn as sns import sklearn.preprocessing from python_som import SOM", "in df.iloc[:, 3:-1].columns: hmap = som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T fig", "try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = 0", "print(df.groupby('hour')['rac'].count()) column = 'hour' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap =", "pickle.load(f) except FileNotFoundError: som = SOM(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5, neighborhood_radius=1.0,", "fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'grupo'", "'grupo']).value_counts(), sep='\\n') df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True) # Rebuilding dataframe index", "[markdown] # # Loading dataset # %% df = pd.read_csv('features_means.csv',", "grupo):\", df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(), sep='\\n') df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True) #", "Visualizing distribution of features # %% for column in df.iloc[:,", "features # %% for column in df.iloc[:, 3:-1].columns: hmap =", "1)) df_train = df.iloc[:, 3:-1].copy() df_train = scaler.fit_transform(df_train) # %%", "np.array(df['rac'])) hmap = np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()):", "y=None, input_len=df_train.shape[1], learning_rate=0.5, neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=True, cyclic_y=True, data=df_train) # Training", "# %% # Visualizing 'rac' distribution class_assignments = som.label_map(np.array(df_train), np.array(df['rac']))", "%% df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine') plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\", palette=sns.color_palette(\"muted\",", "and dropping duplicates # %% # Resetting index for duplicate", "# Visualizing by 'grupo' print(df.groupby('grupo')['rac'].count()) column = 'grupo' class_assignments =", "palette=sns.color_palette(\"muted\", n_colors=6, desat=1.0)) ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine') for p", "# Loading dataset # %% df = pd.read_csv('features_means.csv', index_col=0, verbose=True)", "based on the ratio between the # first two principal", "# Visualizing 'rac' distribution class_assignments = som.label_map(np.array(df_train), np.array(df['rac'])) hmap =", "matrix anc activation matrix separately fig = plt.figure(figsize=(16, 9)) ax", "ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight', transparent=True) plt.draw() # %% # Visualizing distance", "(df['days'] - df['days'][0]) df['days'] = df['days'].apply(lambda x: x.days) df['hour'] =", "2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing", "activation matrix umatrix = som.distance_matrix() fig, (ax1, ax2) = plt.subplots(1,", "p in ax.patches: ax.annotate(f'\\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()), ha='center', va='top',", "by 'days' print(df.groupby('days')['rac'].count()) column = 'days' class_assignments = som.label_map(np.array(df_train), np.array(df[column]))", "%% [markdown] # ## Checking for and dropping duplicates #", "dataset for class balancing # tail_size = abs( # len(df[df['rac'].astype(int)", "(datetime, ala, grupo):\", df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(), sep='\\n') df.drop_duplicates(subset=['datetime', 'ala', 'grupo'],", "print(\"Duplicates by filename:\", df.duplicated(subset=['file_name']).value_counts(), sep='\\n') df.drop_duplicates(subset=['file_name'], inplace=True) print(\"Duplicates by (datetime,", "hue='ractopamine') for p in ax.patches: ax.annotate(f'\\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()),", "fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.close(fig=fig) # %% [markdown] # ## Visualizing", "n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight', transparent=True) plt.show()", "plt.show() # %% # Visualizing by 'days' print(df.groupby('days')['rac'].count()) column =", "node is colorized according to its most frequent label #", "# %% import matplotlib.pyplot as plt # import librosa as", "sep='\\n') df.drop_duplicates(subset=['file_name'], inplace=True) print(\"Duplicates by (datetime, ala, grupo):\", df.duplicated(subset=['datetime', 'ala',", "# Second element will be assigned based on the ratio", "sklearn's MinMaxScaler scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1)) df_train = df.iloc[:, 3:-1].copy()", "to its most frequent label # %% df['days'] = df.index.date", "ax = sns.heatmap(hmap, cmap='viridis') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() #", "librosa as lr # import librosa.display as lrdisp import numpy", "# %% df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine') plt.figure(figsize=(10, 10)) sns.set(style=\"whitegrid\",", "by metadata (day, hour, ...) # Each node is colorized", "cmap='BrBG') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.close(fig=fig) # %% [markdown] #", "len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int) == 0])) # df.drop(df.tail(tail_size).index, inplace=True)", "= 'som64_u_grupo1' # %% [markdown] # # Loading dataset #", "sns import sklearn.preprocessing from python_som import SOM FILE_PREFIX = 'som64_u_grupo1'", "hmap = np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()): try:", "df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(), sep='\\n') df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True) # Rebuilding", "Training SOM som.weight_initialization(mode='linear', data=df_train) som.train(data=df_train, mode='random', verbose=True) with open(f'./{FILE_PREFIX}.obj', 'wb')", "df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore df_tmp = df_tmp.reset_index()", "learning_rate=0.5, neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=True, cyclic_y=True, data=df_train) # Training SOM som.weight_initialization(mode='linear',", "open(f'./{FILE_PREFIX}.obj', 'wb') as f: pickle.dump(som, f) # %% som_x, som_y", "# %% [markdown] # ## Checking for and dropping duplicates", "print(df.groupby('grupo')['rac'].count()) column = 'grupo' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap =", "df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True) # Rebuilding dataframe index df.set_index('datetime', inplace=True)", "# ## Checking for and dropping duplicates # %% #", "1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight', transparent=True) plt.show() # %% #", "%% # Visualizing by 'grupo' print(df.groupby('grupo')['rac'].count()) column = 'grupo' class_assignments", "[markdown] # # Imports # %% import matplotlib.pyplot as plt", "as lrdisp import numpy as np import pandas as pd", "= False df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore plt.figure(figsize=(10,", "= som.distance_matrix() fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9))", "'days' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for", "## Visualizing distribution of sample dates # %% df_tmp =", "## Visualizing distribution of audios by metadata (day, hour, ...)", "for p in ax.patches: ax.annotate(f'\\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()), ha='center',", "df[df['grupo'] == 1] # %% # Dropping tail of dataset", "9)) ax = sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight', transparent=True)", "hour, ...) # Each node is colorized according to its", "= class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = -1 hmap =", "= -1 hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax", "SOM(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5, neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=True, cyclic_y=True, data=df_train) #", "%% # using sklearn's MinMaxScaler scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1)) df_train", "lrdisp import numpy as np import pandas as pd import", "%% df['days'] = df.index.date df['days'] = (df['days'] - df['days'][0]) df['days']", "= scaler.fit_transform(df_train) # %% # Defining first element of SOM", "2, figsize=(16, 9)) sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True) sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', ax=ax2,", "= 'days' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y))", "try: with open(f'./{FILE_PREFIX}.obj', 'rb') as f: som = pickle.load(f) except", "pd.read_csv('features_means.csv', index_col=0, verbose=True) df.index = pd.to_datetime(df.index) df['rac'] = False df.loc['2020-09-22':,", "False df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore df_tmp =", "# Visualizing distance matrix anc activation matrix separately fig =", "df.reset_index(inplace=True) print(\"Duplicates by filename:\", df.duplicated(subset=['file_name']).value_counts(), sep='\\n') df.drop_duplicates(subset=['file_name'], inplace=True) print(\"Duplicates by", "= class_assignments[(i, j)].most_common()[0][0] + 1 except Exception: continue hmap =" ]
[ "the supplied competition code. \"\"\" # return Competition return Competition", "results. Returns ------- Competition A Competition object with the most", "competition): \"\"\" Retrieve data and enrich the supplied competition with", "a UpdateEngine object.\"\"\" def get_competition(self, code): \"\"\" Retrieve data for", "- Update the data model with the most resent fixtures", "\"\"\" Retrieve data for the supplied competition code. Returns -------", "recent fixtures and results. Returns ------- Competition A Competition object", "most resent fixtures and results.\"\"\" def __init__(self): \"\"\"Construct a UpdateEngine", "\"\"\" Retrieve data and enrich the supplied competition with the", "and results. Returns ------- Competition A Competition object with the", "A Competition object with the most recent fixtures and results", "def __init__(self): \"\"\"Construct a UpdateEngine object.\"\"\" def get_competition(self, code): \"\"\"", "data and enrich the supplied competition with the most recent", "class UpdateEngine: \"\"\"Prediction Engine - Update the data model with", "Retrieve data for the supplied competition code. Returns ------- Competition", "for the supplied competition code. Returns ------- Competition A Competition", "fixtures and results for the supplied competition code. \"\"\" #", "# return Competition return Competition def update_competition(self, competition): \"\"\" Retrieve", "Update the data model with the most resent fixtures and", "UpdateEngine object.\"\"\" def get_competition(self, code): \"\"\" Retrieve data for the", "data model with the most resent fixtures and results.\"\"\" from", "def update_competition(self, competition): \"\"\" Retrieve data and enrich the supplied", "most resent fixtures and results.\"\"\" from footy.domain import Competition class", "and enrich the supplied competition with the most recent fixtures", "and results.\"\"\" def __init__(self): \"\"\"Construct a UpdateEngine object.\"\"\" def get_competition(self,", "\"\"\" # return Competition return Competition def update_competition(self, competition): \"\"\"", "most recent fixtures and results. Returns ------- Competition A Competition", "from footy.domain import Competition class UpdateEngine: \"\"\"Prediction Engine - Update", "fixtures and results. Returns ------- Competition A Competition object with", "resent fixtures and results.\"\"\" from footy.domain import Competition class UpdateEngine:", "recent fixtures and results for the supplied competition code. \"\"\"", "resent fixtures and results.\"\"\" def __init__(self): \"\"\"Construct a UpdateEngine object.\"\"\"", "return Competition def update_competition(self, competition): \"\"\" Retrieve data and enrich", "the most resent fixtures and results.\"\"\" def __init__(self): \"\"\"Construct a", "with the most recent fixtures and results. Returns ------- Competition", "fixtures and results for the supplied competition code. \"\"\" return", "the most resent fixtures and results.\"\"\" from footy.domain import Competition", "competition code. \"\"\" # return Competition return Competition def update_competition(self,", "results for the supplied competition code. \"\"\" # return Competition", "competition code. Returns ------- Competition A Competition object with the", "the data model with the most resent fixtures and results.\"\"\"", "the most recent fixtures and results. Returns ------- Competition A", "model with the most resent fixtures and results.\"\"\" from footy.domain", "object.\"\"\" def get_competition(self, code): \"\"\" Retrieve data for the supplied", "object with the most recent fixtures and results for the", "for the supplied competition code. \"\"\" # return Competition return", "and results for the supplied competition code. \"\"\" # return", "model with the most resent fixtures and results.\"\"\" def __init__(self):", "__init__(self): \"\"\"Construct a UpdateEngine object.\"\"\" def get_competition(self, code): \"\"\" Retrieve", "with the most resent fixtures and results.\"\"\" def __init__(self): \"\"\"Construct", "and results.\"\"\" from footy.domain import Competition class UpdateEngine: \"\"\"Prediction Engine", "results.\"\"\" def __init__(self): \"\"\"Construct a UpdateEngine object.\"\"\" def get_competition(self, code):", "code. Returns ------- Competition A Competition object with the most", "Returns ------- Competition A Competition object with the most recent", "enrich the supplied competition with the most recent fixtures and", "get_competition(self, code): \"\"\" Retrieve data for the supplied competition code.", "competition with the most recent fixtures and results. Returns -------", "\"\"\"Prediction Engine - Update the data model with the most", "code): \"\"\" Retrieve data for the supplied competition code. Returns", "Engine - Update the data model with the most resent", "\"\"\"Construct a UpdateEngine object.\"\"\" def get_competition(self, code): \"\"\" Retrieve data", "def get_competition(self, code): \"\"\" Retrieve data for the supplied competition", "Retrieve data and enrich the supplied competition with the most", "supplied competition with the most recent fixtures and results. Returns", "update_competition(self, competition): \"\"\" Retrieve data and enrich the supplied competition", "footy.domain import Competition class UpdateEngine: \"\"\"Prediction Engine - Update the", "Competition object with the most recent fixtures and results for", "supplied competition code. \"\"\" # return Competition return Competition def", "code. \"\"\" # return Competition return Competition def update_competition(self, competition):", "supplied competition code. Returns ------- Competition A Competition object with", "data model with the most resent fixtures and results.\"\"\" def", "with the most recent fixtures and results for the supplied", "Competition A Competition object with the most recent fixtures and", "the most recent fixtures and results for the supplied competition", "the supplied competition with the most recent fixtures and results.", "with the most resent fixtures and results.\"\"\" from footy.domain import", "and results for the supplied competition code. \"\"\" return Competition", "data for the supplied competition code. Returns ------- Competition A", "UpdateEngine: \"\"\"Prediction Engine - Update the data model with the", "------- Competition A Competition object with the most recent fixtures", "fixtures and results.\"\"\" from footy.domain import Competition class UpdateEngine: \"\"\"Prediction", "import Competition class UpdateEngine: \"\"\"Prediction Engine - Update the data", "results.\"\"\" from footy.domain import Competition class UpdateEngine: \"\"\"Prediction Engine -", "fixtures and results.\"\"\" def __init__(self): \"\"\"Construct a UpdateEngine object.\"\"\" def", "the supplied competition code. Returns ------- Competition A Competition object", "Competition def update_competition(self, competition): \"\"\" Retrieve data and enrich the", "return Competition return Competition def update_competition(self, competition): \"\"\" Retrieve data", "Competition class UpdateEngine: \"\"\"Prediction Engine - Update the data model", "Competition return Competition def update_competition(self, competition): \"\"\" Retrieve data and", "most recent fixtures and results for the supplied competition code." ]
[ "deckgl_doc_url = DECKGL_URL_BASE + deckgl_docs_layer_name # Create new .html examples", "f in EXAMPLE_GLOB] if not candidate_files: raise Exception(\"No files found", "for rtfd.io, set this variable from the Admin panel raise", "), shell=True, ) python_code = open(pydeck_example_file_name, \"r\").read() doc_source = DOC_TEMPLATE.render(", "pages with code These populate the files you see once", "GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH from utils import to_presentation_name, to_snake_case_string from templates", "set\") def create_rst(pydeck_example_file_name): asset_name = to_snake_case_string(file_name=pydeck_example_file_name) deckgl_docs_layer_name = asset_name.replace(\"_\", \"-\")", "Obviously very rough, should change this eventually to handle views", "def create_rst(pydeck_example_file_name): asset_name = to_snake_case_string(file_name=pydeck_example_file_name) deckgl_docs_layer_name = asset_name.replace(\"_\", \"-\") deckgl_doc_url", "if \"layer\" in deckgl_docs_layer_name: # Don't add a deck.gl docs", "deckgl_doc_url = None if \"layer\" in deckgl_docs_layer_name: # Don't add", "None if \"layer\" in deckgl_docs_layer_name: # Don't add a deck.gl", "%s\" % HTML_DIR, shell=True) pool.map(create_rst, candidate_files) if __name__ == \"__main__\":", "into .rst pages with code These populate the files you", "\"{python} {fname}; mv {html_src} {html_dest}\".format( python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR ),", "for f in EXAMPLE_GLOB] if not candidate_files: raise Exception(\"No files", "examples into .rst pages with code These populate the files", "shell=True, ) python_code = open(pydeck_example_file_name, \"r\").read() doc_source = DOC_TEMPLATE.render( page_title=to_presentation_name(asset_name),", "subprocess.call(\"mkdir -p %s\" % HTML_DIR, shell=True) pool.map(create_rst, candidate_files) if __name__", "# Create new .html examples html_fname = os.path.basename(pydeck_example_file_name).replace(\".py\", \".html\") #", "= DOC_TEMPLATE.render( page_title=to_presentation_name(asset_name), snake_name=asset_name, python_code=python_code, hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname), deckgl_doc_url=deckgl_doc_url, ) rst_path", "if not os.environ.get(\"MAPBOX_API_KEY\"): # If running for rtfd.io, set this", "= DECKGL_URL_BASE + deckgl_docs_layer_name # Create new .html examples html_fname", "not set\") def create_rst(pydeck_example_file_name): asset_name = to_snake_case_string(file_name=pydeck_example_file_name) deckgl_docs_layer_name = asset_name.replace(\"_\",", "populate the files you see once you click into a", "+ \".rst\") f = open(rst_path, \"w+\") print(\"* Converted %s to", "os.path.basename(pydeck_example_file_name).replace(\".py\", \".html\") # Run the pydeck example and move the", "\"layer\" in deckgl_docs_layer_name: # Don't add a deck.gl docs link", "\"r\").read() doc_source = DOC_TEMPLATE.render( page_title=to_presentation_name(asset_name), snake_name=asset_name, python_code=python_code, hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname), deckgl_doc_url=deckgl_doc_url,", "If running for rtfd.io, set this variable from the Admin", "Pool(processes=4) candidate_files = [f for f in EXAMPLE_GLOB] if not", "= Pool(processes=4) candidate_files = [f for f in EXAMPLE_GLOB] if", "found to convert\") subprocess.call(\"mkdir -p %s\" % HTML_DIR, shell=True) pool.map(create_rst,", "HOSTED_STATIC_PATH from utils import to_presentation_name, to_snake_case_string from templates import DOC_TEMPLATE", "multiprocessing import Pool import os import subprocess import sys from", "def main(): pool = Pool(processes=4) candidate_files = [f for f", "snake_name=asset_name, python_code=python_code, hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname), deckgl_doc_url=deckgl_doc_url, ) rst_path = os.path.join(GALLERY_DIR, asset_name", "to_presentation_name, to_snake_case_string from templates import DOC_TEMPLATE if not os.environ.get(\"MAPBOX_API_KEY\"): #", "import Pool import os import subprocess import sys from const", "very rough, should change this eventually to handle views etc", "Converted %s to %s\" % (pydeck_example_file_name, rst_path)) f.write(doc_source) f.close() def", "DECKGL_URL_BASE + deckgl_docs_layer_name # Create new .html examples html_fname =", "pydeck example and move the .html output subprocess.call( \"{python} {fname};", "\".rst\") f = open(rst_path, \"w+\") print(\"* Converted %s to %s\"", "-p %s\" % HTML_DIR, shell=True) pool.map(create_rst, candidate_files) if __name__ ==", "{html_src} {html_dest}\".format( python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR ), shell=True, ) python_code", "not candidate_files: raise Exception(\"No files found to convert\") subprocess.call(\"mkdir -p", "# Don't add a deck.gl docs link if we're not", "deck.gl docs link if we're not referencing a layer #", "os import subprocess import sys from const import DECKGL_URL_BASE, EXAMPLE_GLOB,", "variable from the Admin panel raise Exception(\"MAPBOX_API_KEY not set\") def", "= os.path.join(GALLERY_DIR, asset_name + \".rst\") f = open(rst_path, \"w+\") print(\"*", "to handle views etc deckgl_doc_url = DECKGL_URL_BASE + deckgl_docs_layer_name #", "once you click into a grid cell on the pydeck", "# Run the pydeck example and move the .html output", "and move the .html output subprocess.call( \"{python} {fname}; mv {html_src}", "candidate_files = [f for f in EXAMPLE_GLOB] if not candidate_files:", "deckgl_docs_layer_name = asset_name.replace(\"_\", \"-\") deckgl_doc_url = None if \"layer\" in", "pool = Pool(processes=4) candidate_files = [f for f in EXAMPLE_GLOB]", "pydeck gallery page \"\"\" from multiprocessing import Pool import os", "html_dest=HTML_DIR ), shell=True, ) python_code = open(pydeck_example_file_name, \"r\").read() doc_source =", "in EXAMPLE_GLOB] if not candidate_files: raise Exception(\"No files found to", "files you see once you click into a grid cell", "to %s\" % (pydeck_example_file_name, rst_path)) f.write(doc_source) f.close() def main(): pool", "f = open(rst_path, \"w+\") print(\"* Converted %s to %s\" %", "% (pydeck_example_file_name, rst_path)) f.write(doc_source) f.close() def main(): pool = Pool(processes=4)", "import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH from utils import to_presentation_name,", "from utils import to_presentation_name, to_snake_case_string from templates import DOC_TEMPLATE if", "in deckgl_docs_layer_name: # Don't add a deck.gl docs link if", "os.path.join(GALLERY_DIR, asset_name + \".rst\") f = open(rst_path, \"w+\") print(\"* Converted", "candidate_files: raise Exception(\"No files found to convert\") subprocess.call(\"mkdir -p %s\"", "the pydeck example and move the .html output subprocess.call( \"{python}", "link if we're not referencing a layer # Obviously very", "you click into a grid cell on the pydeck gallery", "HTML_DIR, HOSTED_STATIC_PATH from utils import to_presentation_name, to_snake_case_string from templates import", "if not candidate_files: raise Exception(\"No files found to convert\") subprocess.call(\"mkdir", "eventually to handle views etc deckgl_doc_url = DECKGL_URL_BASE + deckgl_docs_layer_name", "new .html examples html_fname = os.path.basename(pydeck_example_file_name).replace(\".py\", \".html\") # Run the", "convert\") subprocess.call(\"mkdir -p %s\" % HTML_DIR, shell=True) pool.map(create_rst, candidate_files) if", "the .html output subprocess.call( \"{python} {fname}; mv {html_src} {html_dest}\".format( python=sys.executable,", "from const import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH from utils", "\"-\") deckgl_doc_url = None if \"layer\" in deckgl_docs_layer_name: # Don't", "\".html\") # Run the pydeck example and move the .html", "Run the pydeck example and move the .html output subprocess.call(", "the pydeck gallery page \"\"\" from multiprocessing import Pool import", "deckgl_doc_url=deckgl_doc_url, ) rst_path = os.path.join(GALLERY_DIR, asset_name + \".rst\") f =", "%s to %s\" % (pydeck_example_file_name, rst_path)) f.write(doc_source) f.close() def main():", "EXAMPLE_GLOB] if not candidate_files: raise Exception(\"No files found to convert\")", "grid cell on the pydeck gallery page \"\"\" from multiprocessing", "{html_dest}\".format( python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR ), shell=True, ) python_code =", "html_fname), deckgl_doc_url=deckgl_doc_url, ) rst_path = os.path.join(GALLERY_DIR, asset_name + \".rst\") f", "a grid cell on the pydeck gallery page \"\"\" from", "rst_path)) f.write(doc_source) f.close() def main(): pool = Pool(processes=4) candidate_files =", "import to_presentation_name, to_snake_case_string from templates import DOC_TEMPLATE if not os.environ.get(\"MAPBOX_API_KEY\"):", "panel raise Exception(\"MAPBOX_API_KEY not set\") def create_rst(pydeck_example_file_name): asset_name = to_snake_case_string(file_name=pydeck_example_file_name)", ".rst pages with code These populate the files you see", "subprocess import sys from const import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR,", ".html output subprocess.call( \"{python} {fname}; mv {html_src} {html_dest}\".format( python=sys.executable, fname=pydeck_example_file_name,", ") python_code = open(pydeck_example_file_name, \"r\").read() doc_source = DOC_TEMPLATE.render( page_title=to_presentation_name(asset_name), snake_name=asset_name,", "# Obviously very rough, should change this eventually to handle", "raise Exception(\"No files found to convert\") subprocess.call(\"mkdir -p %s\" %", "f.close() def main(): pool = Pool(processes=4) candidate_files = [f for", "should change this eventually to handle views etc deckgl_doc_url =", "cell on the pydeck gallery page \"\"\" from multiprocessing import", "Pool import os import subprocess import sys from const import", "click into a grid cell on the pydeck gallery page", "os.environ.get(\"MAPBOX_API_KEY\"): # If running for rtfd.io, set this variable from", "python_code = open(pydeck_example_file_name, \"r\").read() doc_source = DOC_TEMPLATE.render( page_title=to_presentation_name(asset_name), snake_name=asset_name, python_code=python_code,", "Admin panel raise Exception(\"MAPBOX_API_KEY not set\") def create_rst(pydeck_example_file_name): asset_name =", "handle views etc deckgl_doc_url = DECKGL_URL_BASE + deckgl_docs_layer_name # Create", "add a deck.gl docs link if we're not referencing a", "output subprocess.call( \"{python} {fname}; mv {html_src} {html_dest}\".format( python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname,", "Don't add a deck.gl docs link if we're not referencing", "import DOC_TEMPLATE if not os.environ.get(\"MAPBOX_API_KEY\"): # If running for rtfd.io,", "raise Exception(\"MAPBOX_API_KEY not set\") def create_rst(pydeck_example_file_name): asset_name = to_snake_case_string(file_name=pydeck_example_file_name) deckgl_docs_layer_name", "if we're not referencing a layer # Obviously very rough,", "mv {html_src} {html_dest}\".format( python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR ), shell=True, )", "sys from const import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH from", "html_src=html_fname, html_dest=HTML_DIR ), shell=True, ) python_code = open(pydeck_example_file_name, \"r\").read() doc_source", "rst_path = os.path.join(GALLERY_DIR, asset_name + \".rst\") f = open(rst_path, \"w+\")", "code These populate the files you see once you click", ") rst_path = os.path.join(GALLERY_DIR, asset_name + \".rst\") f = open(rst_path,", "DOC_TEMPLATE if not os.environ.get(\"MAPBOX_API_KEY\"): # If running for rtfd.io, set", "templates import DOC_TEMPLATE if not os.environ.get(\"MAPBOX_API_KEY\"): # If running for", "examples html_fname = os.path.basename(pydeck_example_file_name).replace(\".py\", \".html\") # Run the pydeck example", "a deck.gl docs link if we're not referencing a layer", "fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR ), shell=True, ) python_code = open(pydeck_example_file_name, \"r\").read()", "[f for f in EXAMPLE_GLOB] if not candidate_files: raise Exception(\"No", "files found to convert\") subprocess.call(\"mkdir -p %s\" % HTML_DIR, shell=True)", "not referencing a layer # Obviously very rough, should change", "gallery page \"\"\" from multiprocessing import Pool import os import", "DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH from utils import to_presentation_name, to_snake_case_string", "rtfd.io, set this variable from the Admin panel raise Exception(\"MAPBOX_API_KEY", "views etc deckgl_doc_url = DECKGL_URL_BASE + deckgl_docs_layer_name # Create new", "subprocess.call( \"{python} {fname}; mv {html_src} {html_dest}\".format( python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR", "= open(rst_path, \"w+\") print(\"* Converted %s to %s\" % (pydeck_example_file_name,", "we're not referencing a layer # Obviously very rough, should", "+ deckgl_docs_layer_name # Create new .html examples html_fname = os.path.basename(pydeck_example_file_name).replace(\".py\",", "this eventually to handle views etc deckgl_doc_url = DECKGL_URL_BASE +", "page_title=to_presentation_name(asset_name), snake_name=asset_name, python_code=python_code, hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname), deckgl_doc_url=deckgl_doc_url, ) rst_path = os.path.join(GALLERY_DIR,", "on the pydeck gallery page \"\"\" from multiprocessing import Pool", "= to_snake_case_string(file_name=pydeck_example_file_name) deckgl_docs_layer_name = asset_name.replace(\"_\", \"-\") deckgl_doc_url = None if", "docs link if we're not referencing a layer # Obviously", "= os.path.basename(pydeck_example_file_name).replace(\".py\", \".html\") # Run the pydeck example and move", "% HTML_DIR, shell=True) pool.map(create_rst, candidate_files) if __name__ == \"__main__\": main()", "asset_name.replace(\"_\", \"-\") deckgl_doc_url = None if \"layer\" in deckgl_docs_layer_name: #", "this variable from the Admin panel raise Exception(\"MAPBOX_API_KEY not set\")", "f.write(doc_source) f.close() def main(): pool = Pool(processes=4) candidate_files = [f", "not os.environ.get(\"MAPBOX_API_KEY\"): # If running for rtfd.io, set this variable", "into a grid cell on the pydeck gallery page \"\"\"", "with code These populate the files you see once you", "asset_name = to_snake_case_string(file_name=pydeck_example_file_name) deckgl_docs_layer_name = asset_name.replace(\"_\", \"-\") deckgl_doc_url = None", "= asset_name.replace(\"_\", \"-\") deckgl_doc_url = None if \"layer\" in deckgl_docs_layer_name:", "{fname}; mv {html_src} {html_dest}\".format( python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR ), shell=True,", "to_snake_case_string(file_name=pydeck_example_file_name) deckgl_docs_layer_name = asset_name.replace(\"_\", \"-\") deckgl_doc_url = None if \"layer\"", "open(pydeck_example_file_name, \"r\").read() doc_source = DOC_TEMPLATE.render( page_title=to_presentation_name(asset_name), snake_name=asset_name, python_code=python_code, hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname),", "you see once you click into a grid cell on", "from multiprocessing import Pool import os import subprocess import sys", "asset_name + \".rst\") f = open(rst_path, \"w+\") print(\"* Converted %s", "print(\"* Converted %s to %s\" % (pydeck_example_file_name, rst_path)) f.write(doc_source) f.close()", ".html examples html_fname = os.path.basename(pydeck_example_file_name).replace(\".py\", \".html\") # Run the pydeck", "etc deckgl_doc_url = DECKGL_URL_BASE + deckgl_docs_layer_name # Create new .html", "These populate the files you see once you click into", "referencing a layer # Obviously very rough, should change this", "running for rtfd.io, set this variable from the Admin panel", "create_rst(pydeck_example_file_name): asset_name = to_snake_case_string(file_name=pydeck_example_file_name) deckgl_docs_layer_name = asset_name.replace(\"_\", \"-\") deckgl_doc_url =", "layer # Obviously very rough, should change this eventually to", "import sys from const import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH", "= None if \"layer\" in deckgl_docs_layer_name: # Don't add a", "change this eventually to handle views etc deckgl_doc_url = DECKGL_URL_BASE", "python_code=python_code, hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname), deckgl_doc_url=deckgl_doc_url, ) rst_path = os.path.join(GALLERY_DIR, asset_name +", "Exception(\"MAPBOX_API_KEY not set\") def create_rst(pydeck_example_file_name): asset_name = to_snake_case_string(file_name=pydeck_example_file_name) deckgl_docs_layer_name =", "from templates import DOC_TEMPLATE if not os.environ.get(\"MAPBOX_API_KEY\"): # If running", "DOC_TEMPLATE.render( page_title=to_presentation_name(asset_name), snake_name=asset_name, python_code=python_code, hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname), deckgl_doc_url=deckgl_doc_url, ) rst_path =", "embed pydeck examples into .rst pages with code These populate", "EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH from utils import to_presentation_name, to_snake_case_string from", "to_snake_case_string from templates import DOC_TEMPLATE if not os.environ.get(\"MAPBOX_API_KEY\"): # If", "(pydeck_example_file_name, rst_path)) f.write(doc_source) f.close() def main(): pool = Pool(processes=4) candidate_files", "page \"\"\" from multiprocessing import Pool import os import subprocess", "utils import to_presentation_name, to_snake_case_string from templates import DOC_TEMPLATE if not", "%s\" % (pydeck_example_file_name, rst_path)) f.write(doc_source) f.close() def main(): pool =", "html_fname = os.path.basename(pydeck_example_file_name).replace(\".py\", \".html\") # Run the pydeck example and", "import os import subprocess import sys from const import DECKGL_URL_BASE,", "main(): pool = Pool(processes=4) candidate_files = [f for f in", "const import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH from utils import", "Create new .html examples html_fname = os.path.basename(pydeck_example_file_name).replace(\".py\", \".html\") # Run", "to convert\") subprocess.call(\"mkdir -p %s\" % HTML_DIR, shell=True) pool.map(create_rst, candidate_files)", "rough, should change this eventually to handle views etc deckgl_doc_url", "doc_source = DOC_TEMPLATE.render( page_title=to_presentation_name(asset_name), snake_name=asset_name, python_code=python_code, hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname), deckgl_doc_url=deckgl_doc_url, )", "example and move the .html output subprocess.call( \"{python} {fname}; mv", "to embed pydeck examples into .rst pages with code These", "\"\"\"Script to embed pydeck examples into .rst pages with code", "\"w+\") print(\"* Converted %s to %s\" % (pydeck_example_file_name, rst_path)) f.write(doc_source)", "see once you click into a grid cell on the", "the Admin panel raise Exception(\"MAPBOX_API_KEY not set\") def create_rst(pydeck_example_file_name): asset_name", "pydeck examples into .rst pages with code These populate the", "a layer # Obviously very rough, should change this eventually", "Exception(\"No files found to convert\") subprocess.call(\"mkdir -p %s\" % HTML_DIR,", "the files you see once you click into a grid", "deckgl_docs_layer_name # Create new .html examples html_fname = os.path.basename(pydeck_example_file_name).replace(\".py\", \".html\")", "move the .html output subprocess.call( \"{python} {fname}; mv {html_src} {html_dest}\".format(", "from the Admin panel raise Exception(\"MAPBOX_API_KEY not set\") def create_rst(pydeck_example_file_name):", "= open(pydeck_example_file_name, \"r\").read() doc_source = DOC_TEMPLATE.render( page_title=to_presentation_name(asset_name), snake_name=asset_name, python_code=python_code, hosted_html_path=os.path.join(HOSTED_STATIC_PATH,", "hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname), deckgl_doc_url=deckgl_doc_url, ) rst_path = os.path.join(GALLERY_DIR, asset_name + \".rst\")", "import subprocess import sys from const import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR,", "set this variable from the Admin panel raise Exception(\"MAPBOX_API_KEY not", "deckgl_docs_layer_name: # Don't add a deck.gl docs link if we're", "python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR ), shell=True, ) python_code = open(pydeck_example_file_name,", "# If running for rtfd.io, set this variable from the", "= [f for f in EXAMPLE_GLOB] if not candidate_files: raise", "open(rst_path, \"w+\") print(\"* Converted %s to %s\" % (pydeck_example_file_name, rst_path))", "\"\"\" from multiprocessing import Pool import os import subprocess import" ]
[ "isRot(value): try: a = int(value.split('_')[0].split('R')[1]) b = int(value.split('_')[1][0]) c =", "-1, -1, -1 # return true if it has a", "of 'Ri' # return true if it is a float", "COM (center of mass) position and velocity\\n') out_file.write(' * feet", "xg out_file.write('\\n // jacobian com absolute positions\\n') out_file.write(' if (flag_jacob)\\n", ", 'TorsoYaw_id' , # trunk 'RightShPitch_id' , 'RightShRoll_id' , 'RightShYaw_id'", "two pitch hip rotations # inertial frame: located at the", "= {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' // right foot jacobian\\n') out_file.write('", "nb_bodies*[None] xj = nb_bodies*[None] xgj = nb_bodies*[None] Rj_print = nb_bodies*[None]", "symbolic jacobian of an anchor point def write_symb_xj(nb_bodies, Rj, xj,", "* [None] for i in range(0, nb_contacts): Dpt_r_foot_cont[i] = sp.zeros(3,", "out_file.write(' * // //\\n') out_file.write(' * ////////////////////////\\n') out_file.write(' *\\n') out_file.write('", "out_file.write(' }\\n\\n') out_file.write(' // wrists absolute orientation\\n') for i in", "for j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_elb[j],", "i-1), expressed in the relative # frame of the previous", "{\\n') flag_first = 0 for i in range (1, nb_bodies):", "in_out.IMU_Orientation[{}];\\n'.format(i+1, j+1, 3*i+j)) out_file.write('\\n // IMU - angles velocity\\n') for", "'') for i in range(1, nb_bodies): parent_id = parent_body_index[i] x[i]", "sp.Symbol('L_2_27'), sp.Symbol('L_3_27')]) Dg[22] = sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')]) Dg[23] = sp.Matrix([sp.Symbol('L_1_29'),", "out_file.write('\\n') out_file.write(' // right wrist jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "pos_str == 's{}'.format(der_q): result += sp.Symbol('c{}'.format(der_q)) # other else: [rot_flag,", "Rj, xj, xgj, der_var) # one product elif cur_len ==", "frame of the current body i\\n') out_file.write(' * Omi :", "range(0, nb_bodies): Rj[i] = nb_bodies*[None] xj[i] = nb_bodies*[None] xgj[i] =", "right arm 2, 1, 3, 2 # left arm ])", "# compute the time derivatives of 'yaw_pitch_roll_angles' def theta_dot_compute(out_file, omega_in,", "orientation def wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj,", "cos(in_out.theta_waist[1]);\\n') out_file.write(' c_y_torso = cos(in_out.theta_torso[1]);\\n') out_file.write(' c_z_waist = cos(in_out.theta_waist[2]);\\n') out_file.write('", "cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // right foot orientation matrix as", "== 's{}'.format(der_q): result += sp.Symbol('c{}'.format(der_q)) # other else: [rot_flag, a,", "+= 1 if count >= nb_max_line: out_write.write(';\\n') count = 0", "out_file.write(' * // 17 16 21 //\\n') out_file.write(' * //", "range(0, 9): out_file.write(' in_out.Rwrist_or[{}] = {};\\n'.format(i, R_r_elb[i])) out_file.write('\\n') for i", "range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_elb[j], 'q{}'.format(i+1)) if cur_jac", "[sine, 0.0, cosine]]) elif axis == 3: return np.array([[cosine, sine,", "cosine]]) elif axis == 3: return np.array([[cosine, sine, 0.0], [-sine,", "in range(1, len(cur_split)-1): new_string = '{}{}'.format(new_string, cur_split[i]) else: new_string =", "of the file def write_file_end(out_file): out_file.write('}\\n') # print matrix components", "previous body\\n') out_file.write(' * DGi : position vector from the", "cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1)) if cur_jac !=", "cur_jac != 0: if not flag_first: flag_first = 1 flag_print", "variables initialization def write_intialization(out_file, nb_bodies, joint_id_names): out_file.write(' // -- variables", "xp[0], 'xp1_', '') for i in range(1, nb_bodies): parent_id =", "3): if xgj_print[i][j][k] != None: if not flag_first: flag_first =", "all terms term_list = str(symb_var).replace('- ','-').replace('-','+-').split('+') if term_list[0] == '':", "0.08, -0.045, 0.045) wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj,", "in range(1, len(joint_id_names)): count += 1 if i == 1:", "expression (for jacobian) def symbolic_jacob_der(Rj, xj, xgj, symb_var, der_var): #", "Rt[i] * Dg[i] xgp[i] = xp[i] + om_tilde[i] * (Rt[i]", "= open(in_temp, 'w') # beginning of the file write_file_beginning(file_temp, joint_id_names)", "xp_l_foot + om_tilde_l_foot * (R_l_foot.T * Dpt_l_foot) for i in", "= sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')]) # left arm Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'),", "range(0, 3): for j in range(0, 3): R[0][i,j] = sp.Symbol('IMU{}{}'.format(i+1,", "'') Rt[0] = R[0].T for i in range(1, nb_bodies): Rd[i]", "vector[i]).replace('1.0*','') new_vector[i] = sp.Symbol(elem_name) return new_vector, save_vector # save the", "'theta_Rfoot', R[r_foot_id], 1) out_file.write('\\n') out_file.write(' // left foot orientation matrix", "// waist orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_waist', R[waist_id],", "* Dg[i] xgp[i] = xp[i] + om_tilde[i] * (Rt[i] *", "loop on all the lines for line in f: cut_line", "sp.Symbol('L_3_28')]) Dg[23] = sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')]) # masses M =", "Dg[4] = sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')]) Dg[5] = sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')])", "count = 0 if count != 0: out_write.write(';\\n') # get", "the vector elements for j in range(0, 3): new_vector[j] =", "save_matrix = 9 * [None] for i in range(0,3): for", "flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xj_print[i][j][k])) out_file.write(' }\\n') # xg &", "* omi : absolute rotational vector of body i\\n') out_file.write('", "xj, xgj, Rj_print, R_matrix, index): # loop on all the", "= Dpt_r_foot_cont[i][j] x_r_cont = nb_contacts * [None] x_l_cont = nb_contacts", "(indexes %a, %b, %c also returned) def isRot(value): try: a", "np.array([]) # get vector axis def get_vector_axis(axis, direct, elem): if", "leg 'M_13', 'M_14', 'M_15', 'M_16', 'M_17', 'M_18', # left leg", "= nb_bodies*[None] xgj = nb_bodies*[None] Rj_print = nb_bodies*[None] xj_print =", "-v[0]], [-v[1], v[0], 0.0]]) # get rotation matrix def get_rotation_matrix(axis,", "0.0, cosine]]) elif axis == 3: return np.array([[cosine, -sine, 0.0],", "13 //\\n') out_file.write(' * // //\\n') out_file.write(' * ////////////////////////\\n') out_file.write('", "of body i to its COM (center of mass) G_i,\\n')", "is not always body i-1), expressed in the relative #", "11 //\\n') out_file.write(' * // 06 12 //\\n') out_file.write(' *", "21 # # 18 15 22 # # 19 14", "omega_in[0], body_part, omega_in[1])) out_file.write(' in_out.{}[1] = c_z_{}*{} - s_z_{}*{};\\n'.format(omega_out, body_part,", "// right wrist absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "Dpt, Dg, M): # temporary file in_temp = './{}_temp.cc'.format(out_file_name) file_temp", "* (Rt[i] * Dg[i]) write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, xg[i],", "= 0 pos_str = elem_split[0] elif cur_len == 2: #", "# results out_file.write('\\n // -- Collecting results -- //\\n\\n') com_compute(out_file,", "cut_line_1 = line.split(elem) cut_line_2 = line.split(' = ') if len(cut_line_1)", "left arm ]) # parent index parent_body_index = np.array([ -1,", "range(0, nb_bodies): if flag_first: out_file.write(' + {}*{}'.format(M[j], xg[j][i])) else: flag_first", "15 22 # # 19 14 23 # # 20", "[rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Rfoot', R[r_foot_id], 1) out_file.write('\\n') out_file.write(' // left foot", "16 21 # # 18 15 22 # # 19", "om[i] = om[parent_id] + Rt[parent_id] * Om[i] om[i] = write_symb_vector(out_file,", "= write_symb_vector(out_file, x[0], 'x1_', '') xp[0] = write_symb_vector(out_file, xp[0], 'xp1_',", "compute the wrists position, velocity and orientation def wrists_compute(out_file, joint_id_names,", "{};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left wrist absolute", "= {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' // right foot absolute velocity\\n')", "result = 0 # cosine if pos_str == 'c{}'.format(der_q): result", "= nb_bodies*[None] Rj_print = nb_bodies*[None] xj_print = nb_bodies*[None] xgj_print =", "print_all_declaration(in_temp, out_file, 100) out_file.write('\\n\\n') # copy temporary file out_file.write(line) out_file.close()", "sp.Symbol('DPT_2_39'), 0.0]) Dpt[18] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')]) Dpt[19] = sp.Matrix([0.0,", "leg 'TorsoRoll_id' , 'TorsoPitch_id' , 'TorsoYaw_id' , # trunk 'RightShPitch_id'", "sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')]) Dpt[5] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')]) Dpt[6] =", "new_string)) new_string = cur_split[0].upper() for i in range(1, len(cur_split)): new_string", "Dpt_r_wrist) xp_l = xp_l_elb + om_tilde_l_elb * (R_l_elb.T * Dpt_l_wrist)", "matrix[i,j] == 1: new_matrix[i,j] = matrix[i,j] else: flag_print = 1", "'M_6', # waist 'M_7' , 'M_8' , 'M_9' , 'M_10',", "= atan2(-{}, -{});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) # compute the time derivatives", "is a float def isInt(value): try: int(value) return True except:", "* ////////////////////////\\n') out_file.write(' *\\n') out_file.write(' * origin: in the waist,", "i in range(0, nb_contacts): for j in range(0, 3): Dpt_l_foot_cont[i][j]", "in range(0, nb_bodies): xg[i] = x[i] + Rt[i] * Dg[i]", "of xi\\n') out_file.write(' * xgpi : derivative of xgi\\n') out_file.write('", "velocities\\n') x = nb_bodies*[None] xp = nb_bodies*[None] x[0] = Rt[0]", "07 13 # # # ###################### # # origin: in", "i in range(0,3): out_file.write(' in_out.r_Lfoot[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write('", "Dpt_r_foot_cont[0][1] = y_min Dpt_r_foot_cont[1][1] = y_max Dpt_r_foot_cont[2][1] = y_min Dpt_r_foot_cont[3][1]", "derivative of xgi # omi : absolute rotational vector of", "xp = nb_bodies*[None] x[0] = Rt[0] * Dpt[0] xp[0] =", "j in range(0, nb_bodies): if flag_first: out_file.write(' + {}*{}'.format(M[j], xg[j][i]))", "Dpt[2] = sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0]) Dpt[3] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')])", "Dg[0] = sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')]) # right leg Dg[1] =", "foot orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Lfoot', R[l_foot_id], 1)", "if cur_len == 1: result += der_elem(cur_term_split[0], Rj, xj, xgj,", "declaration -- //\\n\\n') print_all_declaration(in_temp, out_file, 100) out_file.write('\\n\\n') # copy temporary", "'xg{}_'.format(i+1), '') xgp[i] = write_symb_vector(out_file, xgp[i], 'xgp{}_'.format(i+1), '') # jacobian", "sp.Symbol('L_3_6')]) # right leg Dg[1] = sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7') ,", "derivative of an element (for jacobian) def der_elem(elem_str, Rj, xj,", "3, # trunk 2, 1, 3, 2, # right arm", "out_file.write(' c_z_torso = cos(in_out.theta_torso[2]);\\n\\n') out_file.write(' s_y_waist = sin(in_out.theta_waist[1]);\\n') out_file.write(' s_y_torso", "R_r_foot = R[r_foot_id] R_l_foot = R[l_foot_id] Dpt_r_foot = sp.zeros(3, 1)", "// feet absolute orientation\\n') for i in range(0, 9): out_file.write('", "model\\n') out_file.write(' */\\n\\n') out_file.write('// joints enumeration\\n') out_file.write('enum {') count =", "'.format(get_string_enum(joint_id_names[i]), j)) flag_first = 0 for k in range(0, nb_bodies):", "= sp.zeros(3, 1) Dpt_l_foot = sp.zeros(3, 1) Dpt_r_foot[2] = sp.Symbol('DPT_3_16')", "out_file.write('\\n') out_file.write(' // global com jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "inv_c_y_waist = 1.0 / c_y_waist;\\n') out_file.write(' inv_c_y_torso = 1.0 /", "joint_id_names, R, x, xp, om, Rj, xj, xgj, r_elb_id, l_elb_id,", "{} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','')) new_vector[i] = sp.Symbol(elem_name) if flag_print: out_file.write('\\n')", "R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[1],", "* // //\\n') out_file.write(' * ////////////////////////\\n') out_file.write(' *\\n') out_file.write(' *", "= sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')]) Dg[22] = sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')]) Dg[23]", "foot absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Rfoot[{}] =", "# masses M = np.array([ 'M_6', # waist 'M_7' ,", "# end of the file write_file_end(file_temp) file_temp.close() # output file", "'_d{}'.format(i+1)) # write symbolic jacobian of a com point def", "= int(der_var.replace('q','')) # detect positive/negative elem_split = elem_str.split('-') cur_len =", "'LeftShRoll_id' , 'LeftShYaw_id' , 'LeftElbPitch_id' # left arm ]) out_file_name", "# trunk 'RightShPitch_id' , 'RightShRoll_id' , 'RightShYaw_id' , 'RightElbPitch_id', #", "before body i (1:x, 2:y, 3:z) rot_axis = np.array([0, #", "//\\n') out_file.write(' * // 02 08 //\\n') out_file.write(' * //", "of the previous body\\n') out_file.write(' * Rdi : rotational matrix", "angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_torso', R[torso_id], 1) out_file.write('\\n') out_file.write(' c_y_waist =", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}] = {};\\n'.format(i,", "is not always body i-1), expressed in the relative\\n') out_file.write('", "2, 1, 3, 2, 1, 2, # right leg 2,", "out_file.write(' c_z_Rfoot = cos(in_out.theta_Rfoot[2]);\\n') out_file.write(' c_z_Lfoot = cos(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' s_y_Rfoot", "x_r_cont[i][j])) out_file.write('\\n') out_file.write(' // right foot contact points jacobian\\n') out_file.write('", "nb_contacts): for j in range (1, nb_bodies): flag_print = 0", "out_file.write(' * Di : position vector from the anchor point", "in_out.r_Lfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' //", "foot orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[r_foot_id], 'omega_Rfoot', 'Rfoot') out_file.write('\\n')", "out_file.write(line) out_file.close() # remove temporary file os.remove(in_temp) # main script", ", 'LeftFootPitch_id' , # left leg 'TorsoRoll_id' , 'TorsoPitch_id' ,", "e] = isVec(pos_str) # rotation matrix if rot_flag: result +=", "absolute velocity\\n') for i in range(0, 3): out_file.write(' in_out.rp_COM[{}] =", "# writing outputs out_file.write(' // right wrist absolute position\\n') for", "left leg 'M_19', 'M_20', 'M_21', # trunk 'M_22', 'M_23', 'M_24',", "0.0, 0.0], [0.0, cosine, -sine], [0.0, sine, cosine]]) elif axis", "theta_dot_compute(out_file, om[torso_id], 'omega_torso', 'torso') # compute the feet position, velocity", "G_i, # expressed in the relative frame of the current", "(c_z_{}*{} + s_z_{}*{});\\n'.format(omega_out, body_part, body_part, omega_in[0], body_part, omega_in[1])) out_file.write(' in_out.{}[1]", "in range (1, nb_bodies): flag_print = 0 for j in", "new_vector[i] = sp.Symbol(elem_name) return new_vector, save_vector # save the symbolic", "count == 0: out_write.write(' double {}'.format(cut_line[0].strip())) else: out_write.write(', {}'.format(cut_line[0].strip())) count", "elif cur_len == 2: # negative neg_flag = 1 pos_str", "out_file.write('\\n // IMU - rotation matrices\\n') for i in range(0,", "all the joints for i in range (1, nb_bodies): new_matrix", "9 * [None] xj_print[i][j] = 3 * [None] xgj_print[i][j] =", "-- //')) != 1: out_file.write(' // -- variables declaration --", "flag_print = 0 for i in range(0,3): for j in", "2 # left arm ]) # parent index parent_body_index =", "sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')]) Dpt[11] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')]) Dpt[12] =", "in_out.r_Lwrist[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' // left wrist absolute", "xj, xgj, R_r_foot[j], 'q{}'.format(i+1)) if cur_jac != 0: if not", "end_name): new_vector = sp.zeros(3, 1) flag_print = 0 for i", "\\n') out_file.write(' * \\\\author <NAME>\\n') out_file.write(' * \\\\file forward_kinematics.cc\\n') out_file.write('", "cos(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' s_y_Rfoot = sin(in_out.theta_Rfoot[1]);\\n') out_file.write(' s_y_Lfoot = sin(in_out.theta_Lfoot[1]);\\n') out_file.write('", "inputs and outputs class\\n') out_file.write(' *\\n') out_file.write(' * computation of:\\n')", "range(0,3): if vector[i] == 0 or vector[i] == 1: new_vector[i]", "new_matrix[j] = symbolic_jacob_der(Rj, xj, xgj, R_matrix[j], 'q{}'.format(i+1)) [Rj[index-1][i], Rj_print[index-1][i]] =", "19, 23, -0.02, -0.005, -0.225) torso_waist_angles(out_file, R, om, 0, 15)", "feet absolute orientation\\n') for i in range(0, 9): out_file.write(' in_out.Rfoot_or[{}]", "def write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x_vector, index): # loop", "1) save_vector = 3 * [None] for i in range(0,3):", "return new_matrix # save the symbolic vector for print def", "1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write('", "= x_r_foot + R_r_foot.T * Dpt_r_foot_cont[i] x_l_cont[i] = x_l_foot +", "element (for jacobian) def der_elem(elem_str, Rj, xj, xgj, der_var): #", "'RightFootRoll_id', 'RightFootPitch_id', # right leg 'LeftHipPitch_id' , 'LeftHipRoll_id' , 'LeftHipYaw_id'", "c_y_Rfoot;\\n') out_file.write(' inv_c_y_Lfoot = 1.0 / c_y_Lfoot;\\n\\n') out_file.write(' // right", "[0.0, 1.0, 0.0], [sine, 0.0, cosine]]) elif axis == 3:", "print_save_symb_vector(vector, start_name, end_name): new_vector = sp.zeros(3, 1) save_vector = 3", "xp_l_elb = xp[l_elb_id] om_r_elb = om[r_elb_id] om_l_elb = om[l_elb_id] R_r_elb", "om_tilde[i] = get_tilde(om[i]) # x & xp out_file.write('\\n // anchor", "else: elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name) save_matrix[3*i+j] = '", "xgpi : derivative of xgi # omi : absolute rotational", "{});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) else: # epsilon = -1 -> pitch", "position, velocity and orientation def feet_compute(out_file, joint_id_names, R, x, xp,", "range(1, len(cur_split)-1): new_string = '{}{}'.format(new_string, cur_split[i]) else: new_string = cur_string", "start_name, end_name): new_matrix = sp.zeros(3, 3) save_matrix = 9 *", "write_intialization(file_temp, nb_bodies, joint_id_names) # symbolic computation symbolic_computation(file_temp, nb_bodies, joint_id_names, rot_axis,", "sp import re import os ###################### # # # 17", "* // 05 11 //\\n') out_file.write(' * // 06 12", "{};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' // right foot absolute velocity\\n') for", "Rj, xj, xgj, xj_print, x_vector, index): # loop on all", "if count >= nb_max_line: out_write.write(';\\n') count = 0 if count", "in range(1, nb_bodies): out_file.write(' Om{} = in_out.qd_mot[{}];\\n'.format(i+1, joint_id_names[i])) # write", "if xj_print[i][j][k] != None: if not flag_first: flag_first = 1", "np.array([[cosine, -sine, 0.0], [sine, cosine, 0.0], [0.0, 0.0, 1.0]]) else:", "Dg[3] = sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9') , sp.Symbol('L_3_9')]) Dg[4] = sp.Matrix([sp.Symbol('L_1_10'),", "initialization -- //\\n') out_file.write('\\n // IMU - rotation matrices\\n') for", "i in range(0, nb_contacts): Dpt_r_foot_cont[i][2] = sp.Symbol('DPT_3_16') for i in", "Dpt_l_foot_cont[i] # writing outputs out_file.write(' // right foot absolute position\\n')", "left foot contact points jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "the lines for line in f: cut_line_1 = line.split(elem) cut_line_2", "out_file.write('\\n') out_file.write(' in_out.Lwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') #", "return True, a, b except: return False, -1, -1 #", "sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return new_vector # write symbolic matrix", "out_file.write('\\n\\n') # copy temporary file out_file.write(line) out_file.close() # remove temporary", "= 0 for k in range(0, 3): if xgj_print[i][j][k] !=", "left arm Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')]) Dpt[21] = sp.Matrix([0.0,", "rotational matrix between body i and its predecessor # si", "= 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write('", "Dg[14] = sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')]) Dg[15] = sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')])", "Rji : jacobian of \\'Ri\\'\\n') out_file.write(' */\\n') out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\\n{\\n')", "# derivative axis der_q = int(der_var.replace('q','')) # detect positive/negative elem_split", "xp[i] + om_tilde[i] * (Rt[i] * Dg[i]) write_symb_xgj(nb_bodies, Rj, xj,", "com absolute positions\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0", "x[0] = write_symb_vector(out_file, x[0], 'x1_', '') xp[0] = write_symb_vector(out_file, xp[0],", "om, waist_id, torso_id): out_file.write(' // waist orientation matrix as angles", "return np.array([[1.0, 0.0, 0.0], [0.0, cosine, sine], [0.0, -sine, cosine]])", "= xp_r_foot + om_tilde_r_foot * (R_r_foot.T * Dpt_r_foot) xp_l =", "// -- variables declaration -- //\\n\\n') print_all_declaration(in_temp, out_file, 100) out_file.write('\\n\\n')", "= np.array([ 'M_6', # waist 'M_7' , 'M_8' , 'M_9'", "joint names joint_id_names = np.array(['0', # waist 'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id',", "joint_id_names[i])) out_file.write('\\n // joint relative velocities\\n') for i in range(1,", "out_file.write(' if ((!c_y_waist) || (!c_y_torso))\\n {\\n') out_file.write(' return;\\n }\\n\\n') out_file.write('", "= x_min Dpt_r_foot_cont[2][0] = x_max Dpt_r_foot_cont[3][0] = x_max Dpt_r_foot_cont[0][1] =", "xpi : derivative of xi\\n') out_file.write(' * xgpi : derivative", "nb_contacts * [None] # computation om_tilde_r_foot = get_tilde(om_r_foot) om_tilde_l_foot =", "1 out_file.write('\\n') out_file.write(' in_out.r_Lwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n')", "R_matrix[1], R_matrix[0])) else: # epsilon = -1 -> pitch angle", "leg Dpt[1] = sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0]) Dpt[2] = sp.Matrix([0.0, sp.Symbol('DPT_2_6'),", "+ Rt[i] * Dg[i] xgp[i] = xp[i] + om_tilde[i] *", "negative detection !'.format(cur_len)) exit() # compute derivative result = 0", "x[r_elb_id] x_l_elb = x[l_elb_id] xp_r_elb = xp[r_elb_id] xp_l_elb = xp[l_elb_id]", "left foot absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Lfoot[{}]", "current body i # (previous body is not always body", "* Omi : rotational vector from the previous body to", "matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_torso', R[torso_id], 1) out_file.write('\\n') out_file.write('", "3): Dpt_l_foot_cont[i][j] = Dpt_r_foot_cont[i][j] x_r_cont = nb_contacts * [None] x_l_cont", "expressed in the relative frame of the current body i", "matrices\\n') for i in range(0, 3): for j in range(0,", "matrices out_file.write('\\n // rotation matrices\\n') R = nb_bodies*[None] Rt =", "file write_file_beginning(file_temp, joint_id_names) # variables initialization write_intialization(file_temp, nb_bodies, joint_id_names) #", "out_file.write(' in_out.r_Rwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' //", "-sine, 0.0], [sine, cosine, 0.0], [0.0, 0.0, 1.0]]) else: return", "velocity and orientation\\n') out_file.write(' * waist and torso orientaion angles", "# loop on all the matrix elements for j in", "1) x[0] = write_symb_vector(out_file, x[0], 'x1_', '') xp[0] = write_symb_vector(out_file,", "out_file.write(' // left foot orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[l_foot_id],", "'{}{}{}{}'.format(start_name, i+1, j+1, end_name) out_file.write(' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','')) new_matrix[i,j]", "inertial frame) # of the anchor point of body i", "}\\n\\n') out_file.write(' inv_c_y_waist = 1.0 / c_y_waist;\\n') out_file.write(' inv_c_y_torso =", "== 1: new_vector[i] = vector[i] save_vector[i] = None else: elem_name", "com point def write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, x_vector, index):", "nb_bodies*[None] x[0] = Rt[0] * Dpt[0] xp[0] = om_tilde[0] *", "'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id', # right leg 'LeftHipPitch_id' ,", "flag_first = 0 for j in range(0, nb_bodies): if flag_first:", "symbolic vector for print def print_save_symb_vector(vector, start_name, end_name): new_vector =", "before body i # # xi : absolute position vector", "out_file.write('/*! \\n') out_file.write(' * \\\\author <NAME>\\n') out_file.write(' * \\\\file forward_kinematics.cc\\n')", "arm Dg[16] = sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')]) Dg[17] = sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'),", "'Lfoot') out_file.write('\\n') # compute the wrists position, velocity and orientation", "implemented for 0 or 1 !'.format(cur_len-1)) exit() return result #", "j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left foot absolute orientation", "prefix, min, max): out_file.write(' double ') for i in range(min,", "s_y_Rfoot = sin(in_out.theta_Rfoot[1]);\\n') out_file.write(' s_y_Lfoot = sin(in_out.theta_Lfoot[1]);\\n') out_file.write(' s_z_Rfoot =", "leg 1, 2, 3, # trunk 2, 1, 3, 2,", "in range(0, nb_bodies): Rj[i] = nb_bodies*[None] xj[i] = nb_bodies*[None] xgj[i]", "[xgj[index-1][i], xgj_print[index-1][i]] = print_save_symb_vector(new_vector, 'xg{}_'.format(index), '_d{}'.format(i+1)) # symbolic computation def", "initialization write_intialization(file_temp, nb_bodies, joint_id_names) # symbolic computation symbolic_computation(file_temp, nb_bodies, joint_id_names,", "out_file.write(' // right wrist absolute position\\n') for i in range(0,3):", "all the lines for line in f: cut_line = line.split('", "out_file.write(' // waist orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[waist_id], 'omega_waist',", "xp_l_foot = xp[l_foot_id] om_r_foot = om[r_foot_id] om_l_foot = om[l_foot_id] R_r_foot", "= sp.Symbol('DPT_3_16') for i in range(0, nb_contacts): for j in", "xj_print = nb_bodies*[None] xgj_print = nb_bodies*[None] for i in range(0,", "cosine, 0.0], [0.0, 0.0, 1.0]]) else: return np.array([]) # get", "sp.Symbol('L_2_11'), sp.Symbol('L_3_11')]) Dg[6] = sp.Matrix([sp.Symbol('L_1_12'), 0.0 , sp.Symbol('L_3_12')]) # left", "counted , only implemented for 0 or 1 !'.format(cur_len-1)) exit()", "in range(0, 3): out_file.write(' in_out.r_COM_der[{}][{}] = '.format(get_string_enum(joint_id_names[i]), j)) flag_first =", "// -- symbolic computation -- //\\n') # Rj, xj, xgj", "[None] xj_print[i][j] = 3 * [None] xgj_print[i][j] = 3 *", "out_write.write(';\\n') # print all declarations def print_all_declaration(in_file, out_write, nb_max_char): count", "om_tilde_l_elb = get_tilde(om_l_elb) x_r = x_r_elb + R_r_elb.T * Dpt_r_wrist", "om, Rj, xj, xgj, 19, 23, -0.02, -0.005, -0.225) torso_waist_angles(out_file,", "sp.Symbol('L_3_8')]) Dg[3] = sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9') , sp.Symbol('L_3_9')]) Dg[4] =", "sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')]) Dg[23] = sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')]) # masses", "# right arm 'M_26', 'M_27', 'M_28', 'M_29' # left arm", "to derive (string) elem_str = elem_str.replace('- ','-').strip() # derivative axis", "of the previous body # Rdi : rotational matrix between", "of the relative angle before body i\\n') out_file.write(' * ci", "out_file.write('\\n // joint sines\\n') for i in range(1, nb_bodies): out_file.write('", "= write_symb_matrix(out_file, R[i], 'R{}_'.format(i+1), '') Rt[i] = R[i].T # jacobian", "aligned with the ground (info from IMU)\\n') out_file.write(' *\\n') out_file.write('", "09 # # 04 10 # # 05 11 #", "get a string for the enumeration of joints def get_string_enum(cur_string):", "15, 20, 21, 22 # left arm ]) nb_bodies =", "'RightShYaw_id' , 'RightElbPitch_id', # right arm 'LeftShPitch_id' , 'LeftShRoll_id' ,", "of 'elem' in the file def count_elem(in_file, elem): count =", "-> pitch angle in [pi/2 ; 3*pi/2] out_file.write(' in_out.{}[0] =", "the wrists position, velocity and orientation def wrists_compute(out_file, joint_id_names, R,", "else: print('Error: {} instead of 1 or 2 in negative", "velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Lwrist[{}] = {};\\n'.format(i, xp_l[i]))", "matrix def write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R_matrix, index): #", "-- symbolic computation -- //\\n') # Rj, xj, xgj and", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_der[{}][{}] =", "xj_print[i][j][k] != None: if not flag_first: flag_first = 1 flag_print", "range(0, nb_bodies-1): Rj[i][j] = sp.zeros(3, 3) xj[i][j] = sp.zeros(3, 1)", "np.array([ 'M_6', # waist 'M_7' , 'M_8' , 'M_9' ,", "write symbolic jacobian of an anchor point def write_symb_xj(nb_bodies, Rj,", "= 1.0 / c_y_Lfoot;\\n\\n') out_file.write(' // right foot orientation angle", "the declaration of an element def print_declaration_elem(in_file, out_write, elem, nb_max_line):", "b, c except: return False, -1, -1, -1 # return", "body i\\n') out_file.write(' * xgi : absolute position vector of", "'') Rt[i] = R[i].T # jacobian rotation matrices out_file.write('\\n //", "(R_l_foot.T * Dpt_l_foot) for i in range(0, nb_contacts): x_r_cont[i] =", "= '{}{}{}{}'.format(start_name, i+1, j+1, end_name) save_matrix[3*i+j] = ' {} =", "= sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')]) Dg[10] = sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')]) Dg[11]", "rotational matrix\\n') out_file.write(' * Rti : transpose matrix of Ri\\n')", "Dpt_r_foot = sp.zeros(3, 1) Dpt_l_foot = sp.zeros(3, 1) Dpt_r_foot[2] =", "flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') out_file.write('\\n') out_file.write(' // global com absolute", "sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return new_matrix # save the symbolic", "xgj_print[i][j][k] != None: if not flag_first: flag_first = 1 flag_print", ": absolute position vector (from origin, expressed in the inertial", "[rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_waist', R[waist_id], 1) out_file.write('\\n') out_file.write(' // torso orientation", "== 2: return np.array([[cosine, 0.0, -sine], [0.0, 1.0, 0.0], [sine,", "term_list.pop(0) result = 0 # loop on all terms for", "out_file.write(' * // 03 09 //\\n') out_file.write(' * // 04", "out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') out_file.write('\\n') out_file.write(' // global com absolute velocity\\n')", "Rj_print[i] = nb_bodies*[None] xj_print[i] = nb_bodies*[None] xgj_print[i] = nb_bodies*[None] for", "1 out_file.write('\\n') out_file.write(' in_out.Lfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n')", "the waist, middle point between the two pitch hip rotations", "range(0, 9): new_matrix[j] = symbolic_jacob_der(Rj, xj, xgj, R_matrix[j], 'q{}'.format(i+1)) [Rj[index-1][i],", "R[0], 'R1_', '') Rt[0] = R[0].T for i in range(1,", "out_file.write(' // -- variables declaration -- //\\n\\n') print_all_declaration(in_temp, out_file, 100)", "flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xgj_print[i][j][k])) out_file.write(' }\\n') # results out_file.write('\\n", "# computation om_tilde_r_foot = get_tilde(om_r_foot) om_tilde_l_foot = get_tilde(om_l_foot) x_r =", "for i in range(0, nb_bodies): for j in range(1, nb_bodies):", "i in range(0, nb_bodies): xg[i] = x[i] + Rt[i] *", "nb_bodies, joint_id_names): out_file.write(' // -- variables initialization -- //\\n') out_file.write('\\n", "G_i of body i\\n') out_file.write(' * xpi : derivative of", "out_file.write(' }\\n\\n') out_file.write(' // left wrist absolute position\\n') for i", "Collecting results -- //\\n\\n') com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp,", "matrix def get_tilde(v): return np.array([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]],", "cur_len = len(cur_term_split) # no product if cur_len == 1:", "%b, %c also returned) def isRot(value): try: a = int(value.split('_')[0].split('R')[1])", "new_string = cur_string cur_split = filter(None, re.split(\"([A-Z][^A-Z]*)\", new_string)) new_string =", "* xgji : jacobian of \\'xgi\\'\\n') out_file.write(' * Rji :", "sp.Symbol('L_2_17'), sp.Symbol('L_3_17')]) Dg[12] = sp.Matrix([sp.Symbol('L_1_18'), 0.0 , sp.Symbol('L_3_18')]) # trunk", "the previous body to the current body i # (previous", "Dpt_r_foot_cont[3][1] = y_max for i in range(0, nb_contacts): Dpt_r_foot_cont[i][2] =", "get vector axis def get_vector_axis(axis, direct, elem): if direct: if", "'') # jacobian xg out_file.write('\\n // jacobian com absolute positions\\n')", "COM G_i of body i\\n') out_file.write(' * xpi : derivative", "(R_l_elb.T * Dpt_l_wrist) # writing outputs out_file.write(' // right wrist", "-- //\\n\\n') com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj) feet_compute(out_file,", "out_file.write(' c_z_Lfoot = cos(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' s_y_Rfoot = sin(in_out.theta_Rfoot[1]);\\n') out_file.write(' s_y_Lfoot", "R[i] = write_symb_matrix(out_file, R[i], 'R{}_'.format(i+1), '') Rt[i] = R[i].T #", "om_l_elb = om[l_elb_id] R_r_elb = R[r_elb_id] R_l_elb = R[l_elb_id] Dpt_r_wrist", "for j in range(0,3): out_file.write('{}{}{}'.format(prefix, i+1, j+1)) if i ==", "= sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')]) Dpt[14] = sp.Matrix([0.0, 0.0, 0.0]) Dpt[15]", "in_out.Rfoot_or[{}] = {};\\n'.format(i, R_r_foot[i])) out_file.write('\\n') for i in range(0, 9):", "== 1: return np.array([[1.0, 0.0, 0.0], [0.0, cosine, -sine], [0.0,", "out_file.write(' * DGi : position vector from the anchor point", "'_d{}'.format(i+1)) # write symbolic jacobian of an anchor point def", "cur_jac)) out_file.write(' }\\n\\n') # get a string for the enumeration", "* \\\\file forward_kinematics.cc\\n') out_file.write(' * \\\\brief forward kinematics computation for", "of mass) position and velocity\\n') out_file.write(' * feet position, velocity", "om[r_elb_id] om_l_elb = om[l_elb_id] R_r_elb = R[r_elb_id] R_l_elb = R[l_elb_id]", "new_string = cur_split[0].upper() for i in range(1, len(cur_split)): new_string =", "right foot contact points absolute position\\n') for i in range(0,", "= sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0]) Dpt[9] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')]) Dpt[10]", "= sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')]) Dpt[11] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')]) Dpt[12]", "the file write_file_end(file_temp) file_temp.close() # output file out_file = open('./{}.cc'.format(out_file_name),", "matrix, compute the roll, pitch, yaw angles (and derivative) def", "18 15 22 # # 19 14 23 # #", "new_matrix, save_matrix # write symbolic jacobian of a rotation matrix", "sp.zeros(3, 3) flag_print = 0 for i in range(0,3): for", "nb_max_char): count = 0 with open(in_file,'r') as f: # loop", "wrist absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Lwrist[{}] =", "flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k,", "predecessor # si : sine of the relative angle before", "= 0 # loop on all terms for cur_term in", "// 07 13 //\\n') out_file.write(' * // //\\n') out_file.write(' *", "axis == 2: return np.array([[cosine, 0.0, sine], [0.0, 1.0, 0.0],", "in_out.{}[1] = atan2(-{}, sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1],", "[0.0]]) elif axis == 2: return np.array([[0.0], [elem], [0.0]]) elif", "return np.array([[0.0], [-elem], [0.0]]) elif axis == 3: return np.array([[0.0],", "beginning of the file def write_file_beginning(out_file, joint_id_names): out_file.write('/*! \\n') out_file.write('", "def theta_dot_compute(out_file, omega_in, omega_out, body_part): out_file.write(' in_out.{}[0] = inv_c_y_{} *", "= sin(in_out.theta_torso[1]);\\n') out_file.write(' s_z_waist = sin(in_out.theta_waist[2]);\\n') out_file.write(' s_z_torso = sin(in_out.theta_torso[2]);\\n\\n')", "in_out.r_Lfoot_cont[{}][{}] = {};\\n'.format(i, j, x_l_cont[i][j])) out_file.write('\\n') out_file.write(' // left foot", "R[0] = write_symb_matrix(out_file, R[0], 'R1_', '') Rt[0] = R[0].T for", "detection !'.format(cur_len)) exit() # compute derivative result = 0 #", "* expressed in the relative frame of the current body", "// global com absolute velocity\\n') for i in range(0, 3):", "joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M): out_file.write('\\n\\n // -- symbolic", "(and derivative) def yaw_pitch_roll_angles(out_file, angle_name, R_matrix, epsilon): if epsilon >", "# jacobian rotation matrices out_file.write('\\n // jacobian rotation matrices\\n') out_file.write('", "# generate the symbolic output file def gen_symbolic_out(out_file_name, nb_bodies, rot_axis,", "return true if it has a shape 'R%a_%b%c' (indexes %a,", "= get_tilde(om[0]) for i in range(1, nb_bodies): parent_id = parent_body_index[i]", "the relative\\n') out_file.write(' * frame of the previous body\\n') out_file.write('", "line.split(' = ') if len(cut_line_1) == 2 and len(cut_line_2) ==", "write_symb_vector(out_file, x[0], 'x1_', '') xp[0] = write_symb_vector(out_file, xp[0], 'xp1_', '')", "out_file.write(' * // 05 11 //\\n') out_file.write(' * // 06", "# loop on all the vector elements for j in", "R_matrix[1])) out_file.write(' in_out.{}[2] = atan2({}, {});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) else: #", "in range(0,3): out_file.write(' in_out.rp_Rfoot[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' //", "# si : sine of the relative angle before body", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}] =", "range(0, nb_contacts): for j in range(0, 3): out_file.write(' in_out.r_Rfoot_cont[{}][{}] =", "i in range(0,3): out_file.write(' in_out.rp_Lwrist[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write('", "body\\n') out_file.write(' * Rdi : rotational matrix between body i", "sp.Symbol('L_2_25'), sp.Symbol('L_3_25')]) # left arm Dg[20] = sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')])", "write_matrix_declaration(out_file, prefix): out_file.write(' double ') for i in range(0,3): for", "sp.zeros(3, 1) Dpt_r_wrist[0] = r_wrist_x Dpt_r_wrist[1] = r_wrist_y Dpt_r_wrist[2] =", "== 2: result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[1].strip()) result", "om[l_elb_id] R_r_elb = R[r_elb_id] R_l_elb = R[l_elb_id] Dpt_r_wrist = sp.zeros(3,", "out_file.write(' // global com absolute velocity\\n') for i in range(0,", "3): out_file.write(' IMU{}{} = in_out.IMU_Orientation[{}];\\n'.format(i+1, j+1, 3*i+j)) out_file.write('\\n // IMU", "Dpt_l_foot) for i in range(0, nb_contacts): x_r_cont[i] = x_r_foot +", "symbolic_computation(file_temp, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M) # end", "out_file.write(' in_out.Rwrist_or[{}] = {};\\n'.format(i, R_r_elb[i])) out_file.write('\\n') for i in range(0,", "= 1 out_file.write('\\n') out_file.write(' in_out.Lwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write('", "1.0]]) else: return np.array([]) # get vector axis def get_vector_axis(axis,", "new_matrix[i,j] = matrix[i,j] save_matrix[3*i+j] = None else: elem_name = '{}{}{}{}'.format(start_name,", "out_file.write(' inv_c_y_waist = 1.0 / c_y_waist;\\n') out_file.write(' inv_c_y_torso = 1.0", "return new_matrix, save_matrix # write symbolic jacobian of a rotation", "Rj_print, R[0], 1) R[0] = write_symb_matrix(out_file, R[0], 'R1_', '') Rt[0]", "center of mass position and velocity def com_compute(out_file, nb_bodies, joint_id_names,", "waist orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_waist', R[waist_id], 1)", "0 with open(in_file,'r') as f: # loop on all the", "xj, xgj, xj_print, x_vector, index): # loop on all the", "= 1 flag_print = 1 elif not flag_print: flag_print =", "anchor point of body i to its COM (center of", "return np.array([[cosine, sine, 0.0], [-sine, cosine, 0.0], [0.0, 0.0, 1.0]])", "of \\'Ri\\'\\n') out_file.write(' */\\n') out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\\n{\\n') # compute the", "9): out_file.write(' in_out.Lfoot_or[{}] = {};\\n'.format(i, R_l_foot[i])) out_file.write('\\n') out_file.write(' // right", "jacobian rotation matrices out_file.write('\\n // jacobian rotation matrices\\n') out_file.write(' if", "= sp.zeros(3, 1) Dpt_l_foot_cont[i] = sp.zeros(3, 1) Dpt_r_foot_cont[0][0] = x_min", "feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, 6,", "expressed in the relative\\n') out_file.write(' * frame of the previous", "orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Lfoot', R[l_foot_id], 1) out_file.write('\\n')", "write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[0], 1) x[0] = write_symb_vector(out_file,", "of body i\\n') out_file.write(' * xpi : derivative of xi\\n')", "cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_elb[j], 'q{}'.format(i+1)) if cur_jac !=", "= nb_bodies*[None] xgj_print[i] = nb_bodies*[None] for j in range(0, nb_bodies-1):", "nb_bodies): parent_id = parent_body_index[i] x[i] = x[parent_id] + Rt[parent_id] *", "body is not always body i-1), expressed in the relative", "out_file.write(' * \\\\param[in,out] in_out inputs and outputs class\\n') out_file.write(' *\\n')", "= 3 * [None] xgj_print[i][j] = 3 * [None] #", "x[0], 'x1_', '') xp[0] = write_symb_vector(out_file, xp[0], 'xp1_', '') for", "Dpt_l_foot xp_r = xp_r_foot + om_tilde_r_foot * (R_r_foot.T * Dpt_r_foot)", "body i # xpi : derivative of xi # xgpi", "0 for i in range(0,3): for j in range(0,3): if", "velocity def com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj): out_file.write('", "right leg 'LeftHipPitch_id' , 'LeftHipRoll_id' , 'LeftHipYaw_id' , 'LeftKneePitch_id' ,", "xj, xgj, x_l_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0: if not", "out_file.write(' * xgpi : derivative of xgi\\n') out_file.write(' * omi", "count >= nb_max_line: out_write.write(';\\n') count = 0 if count !=", "new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xj[index-1][i], xj_print[index-1][i]] =", "xg[i] = write_symb_vector(out_file, xg[i], 'xg{}_'.format(i+1), '') xgp[i] = write_symb_vector(out_file, xgp[i],", "frame of the current body i # Omi : rotational", "R, om, 0, 15) # generate the symbolic output file", "om[0][i] = sp.Symbol('omega_{}'.format(i+1)) om[0] = write_symb_vector(out_file, om[0], 'om1_', '') om_tilde[0]", "computation\\n') out_file.write(' *\\n') out_file.write(' * \\\\param[in,out] in_out inputs and outputs", "= ' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','') new_vector[i] = sp.Symbol(elem_name) return", "x_l_foot + R_l_foot.T * Dpt_l_foot xp_r = xp_r_foot + om_tilde_r_foot", "relative frame of the current body i\\n') out_file.write(' * Omi", "out_file.write(' // torso orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[torso_id], 'omega_torso',", "xgj, r_elb_id, l_elb_id, r_wrist_x, r_wrist_y, r_wrist_z): # symbolic variables declarations", "i != nb_bodies-1: out_file.write('\\n') else: out_file.write(' }\\n\\n') # from an", "+ R_l_elb.T * Dpt_l_wrist xp_r = xp_r_elb + om_tilde_r_elb *", "3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r_cont[i][k], 'q{}'.format(j+1)) if cur_jac", "in_out.Lwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') # get a", "cut_line = line.split(elem) if len(cut_line) == 2: count += 1", "jacobian com absolute positions\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first =", "11 # # 06 12 # # 07 13 #", "initialization -- //')) != 1: out_file.write(' // -- variables declaration", "left leg Dg[7] = sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')]) Dg[8] = sp.Matrix([sp.Symbol('L_1_14'),", "# return true if it has a shape 'R%a_%b%c' (indexes", "a rotation matrix def write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R_matrix,", "out_file.write(' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','')) new_matrix[i,j] = sp.Symbol(elem_name) if flag_print:", "out_file.write(' s_y_torso = sin(in_out.theta_torso[1]);\\n') out_file.write(' s_z_waist = sin(in_out.theta_waist[2]);\\n') out_file.write(' s_z_torso", "matrix, start_name, end_name): new_matrix = sp.zeros(3, 3) flag_print = 0", "12, -0.06, 0.08, -0.045, 0.045) wrists_compute(out_file, joint_id_names, R, x, xp,", "out_write.write(', {}'.format(cut_line_2[0].strip())) count += 1 if count >= nb_max_line: out_write.write(';\\n')", "except: return False # return true if it has a", "joints enumeration\\n') out_file.write('enum {') count = 0 for i in", "sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')]) # right leg Dg[1] = sp.Matrix([sp.Symbol('L_1_7') ,", "22 //\\n') out_file.write(' * // 19 14 23 //\\n') out_file.write('", "sp.zeros(3, 1) Dpt_l_foot = sp.zeros(3, 1) Dpt_r_foot[2] = sp.Symbol('DPT_3_16') Dpt_l_foot[2]", "(string) elem_str = elem_str.replace('- ','-').strip() # derivative axis der_q =", "body i\\n') out_file.write(' * ci : cosine of the relative", "out_file.write(' }\\n') # results out_file.write('\\n // -- Collecting results --", "= elem_str.replace('- ','-').strip() # derivative axis der_q = int(der_var.replace('q','')) #", "= print_save_symb_matrix(new_matrix, 'R{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian of an", "Dpt[i]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[i], i+1) x[i] =", "= 0; with open(in_file, 'r') as f: # loop on", "// global com absolute position\\n') for i in range(0, 3):", "right foot absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Rfoot[{}]", "= nb_bodies*[None] xj[i] = nb_bodies*[None] xgj[i] = nb_bodies*[None] Rj_print[i] =", "Dpt[3] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')]) Dpt[4] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')])", "c_y_torso = cos(in_out.theta_torso[1]);\\n') out_file.write(' c_z_waist = cos(in_out.theta_waist[2]);\\n') out_file.write(' c_z_torso =", "range(1, nb_bodies): parent_id = parent_body_index[i] x[i] = x[parent_id] + Rt[parent_id]", "vector from the anchor point of the previous body to", "2, 3, # trunk 2, 1, 3, 2, # right", "or vector[i] == 1: new_vector[i] = vector[i] else: flag_print =", "None else: elem_name = '{}{}{}'.format(start_name, i+1, end_name) save_vector[i] = '", "wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, r_elb_id,", "+ om_tilde[parent_id] * (Rt[parent_id] * Dpt[i]) write_symb_xj(nb_bodies, Rj, xj, xgj,", "Dpt[0]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[0], 1) x[0] =", "if flag_first: out_file.write(' + {}*{}'.format(M[k], str(xgj[k][i][j]))) else: flag_first = 1", "'R1_', '') Rt[0] = R[0].T for i in range(1, nb_bodies):", "= 1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(Rj_print[i][j][k]))", "out_file.write(' * ////////////////////////\\n') out_file.write(' * // //\\n') out_file.write(' * //", "1) om[0] = sp.zeros(3, 1) for i in range(0,3): om[0][i]", "exit() return result # write the beginning of the file", "if flag_print: out_file.write('\\n') return new_matrix # save the symbolic vector", "# beginning of the file write_file_beginning(file_temp, joint_id_names) # variables initialization", "# omega out_file.write('\\n // joint absolute velocities\\n') Om = nb_bodies*[None]", "R_l_foot.T * Dpt_l_foot xp_r = xp_r_foot + om_tilde_r_foot * (R_r_foot.T", "der_var)*sp.Symbol(cur_term_split[1].strip()) result += der_elem(cur_term_split[1], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[0].strip()) # other", "sp.Symbol('DPT_3_12')]) Dpt[6] = sp.Matrix([0.0, 0.0, 0.0]) # left leg Dpt[7]", "sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8') , sp.Symbol('L_3_8')]) Dg[3] = sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9')", "Rj, xj, xgj, Rj_print, R[i], i+1) R[i] = write_symb_matrix(out_file, R[i],", "0.0]) Dpt[2] = sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0]) Dpt[3] = sp.Matrix([0.0, 0.0,", "np.array([[1.0, 0.0, 0.0], [0.0, cosine, sine], [0.0, -sine, cosine]]) elif", "of the current body i # Omi : rotational vector", "out_file.write(' in_out.{}[2] = inv_c_y_{} * s_y_{} * (s_z_{}*{} + c_z_{}*{})", "{};\\n'.format(omega_out, body_part, body_part, body_part, omega_in[1], body_part, omega_in[0], omega_in[2])) # angles", "sp.Symbol('L_3_21')]) # right arm Dg[16] = sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')]) Dg[17]", "sp.Symbol('IMU{}{}'.format(i+1, j+1)) write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[0], 1) R[0]", "1: new_matrix[i,j] = matrix[i,j] else: flag_print = 1 elem_name =", "k == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') if i", "len(cur_split)): new_string = '{}_{}'.format(new_string, cur_split[i].upper()) return new_string # write the", "out_file.write(' in_out.r_COM[{}] = '.format(i)) flag_first = 0 for j in", "19 14 23 # # 20 01 24 # #", "pitch hip rotations # inertial frame: located at the origin", "# xgji : jacobian of 'xgi' # Rji : jacobian", "xgj, R_l_elb[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first:", "axis == 3: return np.array([[cosine, sine, 0.0], [-sine, cosine, 0.0],", "or 2 in negative detection !'.format(cur_len)) exit() # compute derivative", "# Rti : transpose matrix of Ri # xji :", "{\\n') flag_first = 0 for i in range(0, nb_contacts): for", "save_vector[i] = ' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','') new_vector[i] = sp.Symbol(elem_name)", "om, 0, 15) # generate the symbolic output file def", "xg[i] = x[i] + Rt[i] * Dg[i] xgp[i] = xp[i]", ": derivative of xgi # omi : absolute rotational vector", "range(0, nb_contacts): x_r_cont[i] = x_r_foot + R_r_foot.T * Dpt_r_foot_cont[i] x_l_cont[i]", "leg Dg[7] = sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')]) Dg[8] = sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'),", "# waist 'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id', # right", "in_out.{}[1] = c_z_{}*{} - s_z_{}*{};\\n'.format(omega_out, body_part, omega_in[1], body_part, omega_in[0])) out_file.write('", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xgj_print[i][j][k])) out_file.write(' }\\n') #", "= in_out.qd_mot[{}];\\n'.format(i+1, joint_id_names[i])) # write symbolic vector and replace symbolic", "instead of 1 or 2 in negative detection !'.format(cur_len)) exit()", "R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, -sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0],", "* Dpt_r_foot_cont[i] x_l_cont[i] = x_l_foot + R_l_foot.T * Dpt_l_foot_cont[i] #", "the relative angle before body i\\n') out_file.write(' *\\n') out_file.write(' *", "1 pos_str = elem_split[1] else: print('Error: {} instead of 1", "xgj, r_foot_id, l_foot_id, x_min, x_max, y_min, y_max): # symbolic variables", "cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l_cont[i][k], 'q{}'.format(j+1)) if cur_jac !=", "in_out.{}[0] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{},", "1 out_file.write('\\n') out_file.write(' in_out.Rwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n')", "in_out.{}[2] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) # compute the time", "def print_save_symb_matrix(matrix, start_name, end_name): new_matrix = sp.zeros(3, 3) save_matrix =", "der_var): # list of all terms term_list = str(symb_var).replace('- ','-').replace('-','+-').split('+')", "cur_len == 1: result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var)", "transpose matrix of Ri # xji : jacobian of 'xi'", "computation -- //\\n') # Rj, xj, xgj and xgj (jacobian)", "(1, nb_bodies): new_vector = sp.zeros(3, 1) # loop on all", "outputs class\\n') out_file.write(' *\\n') out_file.write(' * computation of:\\n') out_file.write(' *", "point of body i\\n') out_file.write(' * xgi : absolute position", "def write_intialization(out_file, nb_bodies, joint_id_names): out_file.write(' // -- variables initialization --", "-- Collecting results -- //\\n\\n') com_compute(out_file, nb_bodies, joint_id_names, M, xg,", "R_l_foot = R[l_foot_id] Dpt_r_foot = sp.zeros(3, 1) Dpt_l_foot = sp.zeros(3,", "Rdi : rotational matrix between body i and its predecessor", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]),", "xj, xgj, 6, 12, -0.06, 0.08, -0.045, 0.045) wrists_compute(out_file, joint_id_names,", "Dg[i] xgp[i] = xp[i] + om_tilde[i] * (Rt[i] * Dg[i])", "= sp.zeros(3, 1) Dpt_r_wrist[0] = r_wrist_x Dpt_r_wrist[1] = r_wrist_y Dpt_r_wrist[2]", "# 18 15 22 # # 19 14 23 #", ": derivative of xi # xgpi : derivative of xgi", "variables declaration -- //\\n\\n') print_all_declaration(in_temp, out_file, 100) out_file.write('\\n\\n') # copy", "= sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')]) Dg[19] = sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')]) #", "line.split(elem) cut_line_2 = line.split(' = ') if len(cut_line_1) == 2", "Dpt_r_wrist[2] = r_wrist_z Dpt_l_wrist[0] = r_wrist_x Dpt_l_wrist[1] = -r_wrist_y Dpt_l_wrist[2]", "'M_8' , 'M_9' , 'M_10', 'M_11', 'M_12', # right leg", "{};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left foot", "2: return np.array([[0.0], [-elem], [0.0]]) elif axis == 3: return", "= sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')]) Dpt[6] = sp.Matrix([0.0, 0.0, 0.0]) #", "body i # Omi : rotational vector from the previous", "return result # write the beginning of the file def", ": jacobian of \\'xi\\'\\n') out_file.write(' * xgji : jacobian of", "x_min Dpt_r_foot_cont[1][0] = x_min Dpt_r_foot_cont[2][0] = x_max Dpt_r_foot_cont[3][0] = x_max", "relative # frame of the previous body # DGi :", "declaration of an element def print_declaration_elem(in_file, out_write, elem, nb_max_line): if", "1) Dpt_r_wrist[0] = r_wrist_x Dpt_r_wrist[1] = r_wrist_y Dpt_r_wrist[2] = r_wrist_z", "point of the previous body to the current body i", "0 for j in range(0, nb_bodies): if flag_first: out_file.write(' +", "left foot orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[l_foot_id], 'omega_Lfoot', 'Lfoot')", "return True except: return False # return true if it", "in [-pi/2 ; pi/2] out_file.write(' in_out.{}[0] = atan2({}, {});\\n'.format(angle_name, R_matrix[5],", "= sin(in_out.theta_Rfoot[1]);\\n') out_file.write(' s_y_Lfoot = sin(in_out.theta_Lfoot[1]);\\n') out_file.write(' s_z_Rfoot = sin(in_out.theta_Rfoot[2]);\\n')", "= sin(in_out.theta_Rfoot[2]);\\n') out_file.write(' s_z_Lfoot = sin(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' if ((!c_y_Rfoot) ||", "write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[i], i+1) x[i] = write_symb_vector(out_file,", "def print_declaration_elem(in_file, out_write, elem, nb_max_line): if count_elem(in_file, '{}'.format(elem)) >= 1:", "open(in_file,'r') as f: # loop on all the lines for", ", 'LeftHipYaw_id' , 'LeftKneePitch_id' , 'LeftFootRoll_id' , 'LeftFootPitch_id' , #", "line.split(' = ') if len(cut_line) == 2: if len(cut_line[0].split('[')) ==", "all terms for cur_term in term_list: # detect products cur_term_split", "# one product elif cur_len == 2: result += der_elem(cur_term_split[0],", ": position vector from the anchor point of the previous", "len(cut_line_2) == 2: if len(cut_line_2[0].split('[')) == 1: if count ==", "cur_split[0] for i in range(1, len(cur_split)-1): new_string = '{}{}'.format(new_string, cur_split[i])", "False, -1, -1 # count the number of 'elem' in", "body i and its predecessor # si : sine of", "== 1: new_vector[i] = vector[i] else: flag_print = 1 elem_name", "* Dpt[0]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[0], 1) x[0]", "'LeftFootRoll_id' , 'LeftFootPitch_id' , # left leg 'TorsoRoll_id' , 'TorsoPitch_id'", "with open(in_file,'r') as f: # loop on all the lines", "0.0], [0.0, cosine, -sine], [0.0, sine, cosine]]) elif axis ==", "R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2],", "declarations x_r_elb = x[r_elb_id] x_l_elb = x[l_elb_id] xp_r_elb = xp[r_elb_id]", "= sp.zeros(3, 3) R[0] = sp.zeros(3, 3) for i in", "cosine of the relative angle before body i # #", "the symbolic output file def gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names,", "f: cut_line_1 = line.split(elem) cut_line_2 = line.split(' = ') if", "//\\n') # Rj, xj, xgj and xgj (jacobian) Rj =", "the feet position, velocity and orientation def feet_compute(out_file, joint_id_names, R,", "= 0 for j in range(0, 3): cur_jac = symbolic_jacob_der(Rj,", "-1 # count the number of 'elem' in the file", "for j in range(0,3): if matrix[i,j] == 0 or matrix[i,j]", "velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Rwrist[{}] = {};\\n'.format(i, xp_r[i]))", "om_tilde_l_foot = get_tilde(om_l_foot) x_r = x_r_foot + R_r_foot.T * Dpt_r_foot", "# symbolic computation symbolic_computation(file_temp, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg,", "position vector from the anchor point of the previous body", "out_file.write(' in_out.{}[1] = c_z_{}*{} - s_z_{}*{};\\n'.format(omega_out, body_part, omega_in[1], body_part, omega_in[0]))", "if j == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') out_file.write('\\n')", "x_l_cont[i][j])) out_file.write('\\n') out_file.write(' // left foot contact points jacobian\\n') out_file.write('", "a, b except: return False, -1, -1 # count the", "= nb_bodies*[None] Om[0] = sp.zeros(3, 1) om[0] = sp.zeros(3, 1)", "flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(Rj_print[i][j][k])) out_file.write(' }\\n') # omega out_file.write('\\n", "in range(0,3): out_file.write(' in_out.r_Rfoot[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' //", "out_file.write(';\\n\\n') else: out_file.write(' + ') out_file.write(' // global com absolute", "if i == 2 and j == 2: out_file.write(';\\n') else:", "= R[0].T for i in range(1, nb_bodies): Rd[i] = get_rotation_matrix(rot_axis[i],", "beginning of the file write_file_beginning(file_temp, joint_id_names) # variables initialization write_intialization(file_temp,", "origin, expressed in the inertial frame) # of the anchor", "= 1.0 / c_y_waist;\\n') out_file.write(' inv_c_y_torso = 1.0 / c_y_torso;\\n\\n')", "'M_27', 'M_28', 'M_29' # left arm ]) # joint names", "else: out_file.write('0.0;\\n') if i != nb_bodies-1: out_file.write('\\n') else: out_file.write(' }\\n\\n')", "Rji : jacobian of 'Ri' # return true if it", "# element to derive (string) elem_str = elem_str.replace('- ','-').strip() #", "{} instead of 1 or 2 in negative detection !'.format(cur_len))", "(R_r_foot.T * Dpt_r_foot) xp_l = xp_l_foot + om_tilde_l_foot * (R_l_foot.T", "vector[i] == 0 or vector[i] == 1: new_vector[i] = vector[i]", "[0.0, -sine, cosine]]) elif axis == 2: return np.array([[cosine, 0.0,", "2: out_file.write(';\\n') else: out_file.write(', ') # print variables declaration def", "len(cut_line[0].split('[')) == 1: if count == 0: out_write.write(' double {}'.format(cut_line[0].strip()))", "i == 2 and j == 2: out_file.write(';\\n') else: out_file.write(',", "3): new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xj[index-1][i], xj_print[index-1][i]]", "1 out_file.write('\\n') out_file.write('{}'.format(xgj_print[i][j][k])) out_file.write(' }\\n') # results out_file.write('\\n // --", "= 9 * [None] for i in range(0,3): for j", "body i\\n') out_file.write(' * Omi : rotational vector from the", "if (flag_jacob)\\n {\\n') flag_first = 0 for i in range(0,", "0 for i in range(0,3): if vector[i] == 0 or", "sympy as sp import re import os ###################### # #", "for i in range(0, 9): out_file.write(' in_out.Rwrist_or[{}] = {};\\n'.format(i, R_r_elb[i]))", "arm ]) # parent index parent_body_index = np.array([ -1, #", "located at the origin (waist), but aligned with the ground", "np.array([[1.0, 0.0, 0.0], [0.0, cosine, -sine], [0.0, sine, cosine]]) elif", "cur_term.split('*') cur_len = len(cur_term_split) # no product if cur_len ==", "}\\n\\n') out_file.write(' // right foot contact points absolute position\\n') for", "out_file.write(' }\\n\\n') out_file.write(' // feet absolute orientation\\n') for i in", "right leg 0, 7, 8, 9, 10, 11, # left", "xpi : derivative of xi # xgpi : derivative of", "for line in f: cut_line_1 = line.split(elem) cut_line_2 = line.split('", "print_declaration_elem(in_file, out_write, elem, nb_max_line): if count_elem(in_file, '{}'.format(elem)) >= 1: count", "0: # epsilon = 1 -> pitch angle in [-pi/2", "= {};\\n'.format(elem_name, vector[i]).replace('1.0*','') new_vector[i] = sp.Symbol(elem_name) return new_vector, save_vector #", "* Dpt_l_wrist) # writing outputs out_file.write(' // right wrist absolute", "in range(0, 9): out_file.write(' in_out.Rwrist_or[{}] = {};\\n'.format(i, R_r_elb[i])) out_file.write('\\n') for", "'xgi' # Rji : jacobian of 'Ri' # return true", "for j in range(0, 3): Dpt_l_foot_cont[i][j] = Dpt_r_foot_cont[i][j] x_r_cont =", "np.array([[cosine, 0.0, -sine], [0.0, 1.0, 0.0], [sine, 0.0, cosine]]) elif", "= {};\\n'.format(i, R_r_foot[i])) out_file.write('\\n') for i in range(0, 9): out_file.write('", "in range(1, len(cur_split)): new_string = '{}_{}'.format(new_string, cur_split[i].upper()) return new_string #", "x[0], 1) x[0] = write_symb_vector(out_file, x[0], 'x1_', '') xp[0] =", "com absolute velocity\\n') for i in range(0, 3): out_file.write(' in_out.rp_COM[{}]", "// left foot orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Lfoot',", "= 4 x_r_foot = x[r_foot_id] x_l_foot = x[l_foot_id] xp_r_foot =", "xi : absolute position vector (from origin, expressed in the", "(for jacobian) def symbolic_jacob_der(Rj, xj, xgj, symb_var, der_var): # list", "absolute positions\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0 for", "arm ]) # joint names joint_id_names = np.array(['0', # waist", "out_file.write('\\n') # compute the wrists position, velocity and orientation def", "2: count += 1 return count # print the declaration", "for j in range(0, 3): new_vector[j] = symbolic_jacob_der(Rj, xj, xgj,", "= xp[parent_id] + om_tilde[parent_id] * (Rt[parent_id] * Dpt[i]) write_symb_xj(nb_bodies, Rj,", "joint_id_names, R, x, xp, om, Rj, xj, xgj, 19, 23,", "vector of the COM G_i of body i # xpi", "Dpt[21] = sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0]) Dpt[22] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')])", "cur_len == 2: # negative neg_flag = 1 pos_str =", "= atan2(-{}, -{});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, -sqrt({}*{}", "compute the derivative of an expression (for jacobian) def symbolic_jacob_der(Rj,", "left foot orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Lfoot', R[l_foot_id],", "c_y_Lfoot;\\n\\n') out_file.write(' // right foot orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file,", "sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')]) # right arm Dg[16] = sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'),", "direct, cosine, sine): if direct: if axis == 1: return", "nb_bodies*[None] # waist Dpt[0] = sp.Matrix([0.0, 0.0, 0.0]) # right", "left wrist absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "= print_save_symb_vector(new_vector, 'xg{}_'.format(index), '_d{}'.format(i+1)) # symbolic computation def symbolic_computation(out_file, nb_bodies,", "out_file.write(' in_out.Rfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' //", "to the current body i # (previous body is not", "* (R_r_elb.T * Dpt_r_wrist) xp_l = xp_l_elb + om_tilde_l_elb *", "of the relative angle before body i # # xi", "velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Rfoot[{}] = {};\\n'.format(i, xp_r[i]))", "out_file.write('\\n') out_file.write(' // left wrist absolute velocity\\n') for i in", "'{}{}{}'.format(start_name, i+1, end_name) out_file.write(' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','')) new_vector[i] =", "frame of the previous body\\n') out_file.write(' * DGi : position", "out_file.write('\\n') out_file.write(' c_y_waist = cos(in_out.theta_waist[1]);\\n') out_file.write(' c_y_torso = cos(in_out.theta_torso[1]);\\n') out_file.write('", "// -- Collecting results -- //\\n\\n') com_compute(out_file, nb_bodies, joint_id_names, M,", "1, sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1))) R[i] = Rd[i] * R[parent_body_index[i]] write_symb_Rj(nb_bodies, Rj,", "body i \\n') out_file.write(' * (previous body is not always", "* // 19 14 23 //\\n') out_file.write(' * // 20", "return False, -1, -1, -1 # return true if it", "epsilon = 1 -> pitch angle in [-pi/2 ; pi/2]", "for i in range(1, nb_bodies): out_file.write(' s{} = sin(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i]))", "None: if not flag_first: flag_first = 1 flag_print = 1", "parent_body_index, joint_id_names, Dpt, Dg, M): # temporary file in_temp =", "cosine if pos_str == 'c{}'.format(der_q): result += -sp.Symbol('s{}'.format(der_q)) # sine", "05 11 # # 06 12 # # 07 13", "3): out_file.write(' in_out.r_COM_der[{}][{}] = '.format(get_string_enum(joint_id_names[i]), j)) flag_first = 0 for", "# write symbolic jacobian of an anchor point def write_symb_xj(nb_bodies,", "xgi\\n') out_file.write(' * omi : absolute rotational vector of body", "sp.Symbol('L_2_22'), sp.Symbol('L_3_22')]) Dg[17] = sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')]) Dg[18] = sp.Matrix([sp.Symbol('L_1_24'),", "9): out_file.write(' in_out.Rwrist_or[{}] = {};\\n'.format(i, R_r_elb[i])) out_file.write('\\n') for i in", "def write_symb_matrix(out_file, matrix, start_name, end_name): new_matrix = sp.zeros(3, 3) flag_print", "in range(1, nb_bodies): flag_print = 0 for k in range(0,", "range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1)) if", "start_name, end_name): new_vector = sp.zeros(3, 1) flag_print = 0 for", "out_file.write(' * COM (center of mass) position and velocity\\n') out_file.write('", "1) out_file.write('\\n') out_file.write(' c_y_Rfoot = cos(in_out.theta_Rfoot[1]);\\n') out_file.write(' c_y_Lfoot = cos(in_out.theta_Lfoot[1]);\\n')", "%b also returned) def isVec(value): try: a = int(value.split('_')[0].split('x')[1]) b", "Rj, xj, xgj, xj_print, x[0], 1) x[0] = write_symb_vector(out_file, x[0],", "07 13 //\\n') out_file.write(' * // //\\n') out_file.write(' * ////////////////////////\\n')", "right arm 15, 20, 21, 22 # left arm ])", "on all the lines for line in f: cut_line =", "to the current body i \\n') out_file.write(' * (previous body", "= '{}{}{}'.format(start_name, i+1, end_name) out_file.write(' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','')) new_vector[i]", "xgj, x_l_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0: if not flag_first:", "'omega_Lfoot', 'Lfoot') out_file.write('\\n') # compute the wrists position, velocity and", "x_vector[j], 'q{}'.format(i+1)) [xj[index-1][i], xj_print[index-1][i]] = print_save_symb_vector(new_vector, 'x{}_'.format(index), '_d{}'.format(i+1)) # write", "i # xgi : absolute position vector of the COM", "jacobian x out_file.write('\\n // jacobian anchor point positions\\n') out_file.write(' if", "# detect positive/negative elem_split = elem_str.split('-') cur_len = len(elem_split) if", "of the previous body to the current body i \\n')", "{};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left foot absolute", "and derivatives\\n') out_file.write(' *\\n') out_file.write(' * ////////////////////////\\n') out_file.write(' * //", "j in range(0, 3): out_file.write(' in_out.r_Lfoot_cont[{}][{}] = {};\\n'.format(i, j, x_l_cont[i][j]))", "0.0, sp.Symbol('DPT_3_4')]) Dpt[14] = sp.Matrix([0.0, 0.0, 0.0]) Dpt[15] = sp.Matrix([0.0,", "range(0,3): for j in range(0,3): if matrix[i,j] == 0 or", "R = nb_bodies*[None] Rt = nb_bodies*[None] Rd = nb_bodies*[None] Rd[0]", "0.0, 0.0]) Dpt[15] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')]) # right arm", "joints def get_string_enum(cur_string): cur_split = cur_string.split('_') if len(cur_split) >= 2:", "== 3: return np.array([[0.0], [0.0], [-elem]]) else: return np.array([]) #", "the previous body\\n') out_file.write(' * Rdi : rotational matrix between", "of the relative angle before body i # ci :", "def write_symb_vector(out_file, vector, start_name, end_name): new_vector = sp.zeros(3, 1) flag_print", "= {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' // left wrist jacobian\\n') out_file.write('", "+ {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] =", "r_wrist_x Dpt_l_wrist[1] = -r_wrist_y Dpt_l_wrist[2] = r_wrist_z # computation om_tilde_r_elb", "in_out.r_Lwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // wrists", "arm Dg[20] = sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')]) Dg[21] = sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'),", "# print the declaration of an element def print_declaration_elem(in_file, out_write,", "isVec(pos_str) # rotation matrix if rot_flag: result += Rj[a-1][der_q-1][(b-1)*3+(c-1)] #", "xp_r = xp_r_foot + om_tilde_r_foot * (R_r_foot.T * Dpt_r_foot) xp_l", "range(0, nb_bodies): xg[i] = x[i] + Rt[i] * Dg[i] xgp[i]", "= nb_bodies*[None] xgj[i] = nb_bodies*[None] Rj_print[i] = nb_bodies*[None] xj_print[i] =", "= parent_body_index[i] Om[i] = get_vector_axis(rot_axis[i], 1, sp.Symbol('Om{}'.format(i+1))) om[i] = om[parent_id]", "sp.Symbol('DPT_3_10')]) Dpt[5] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')]) Dpt[6] = sp.Matrix([0.0, 0.0,", "((!c_y_Rfoot) || (!c_y_Lfoot))\\n {\\n') out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_Rfoot =", "i\\n') out_file.write(' * Omi : rotational vector from the previous", "count the number of 'elem' in the file def count_elem(in_file,", "absolute position vector (from origin, expressed in the inertial frame)", "return False, -1, -1 # count the number of 'elem'", "in the waist, middle point between the two pitch hip", "= x_l_elb + R_l_elb.T * Dpt_l_wrist xp_r = xp_r_elb +", "i in range(0,3): for j in range(0,3): out_file.write('{}{}{}'.format(prefix, i+1, j+1))", "[0.0], [-elem]]) else: return np.array([]) # compute the derivative of", "return np.array([[0.0], [0.0], [elem]]) else: return np.array([]) else: if axis", "= R[l_elb_id] Dpt_r_wrist = sp.zeros(3, 1) Dpt_l_wrist = sp.zeros(3, 1)", "+ om_tilde_l_elb * (R_l_elb.T * Dpt_l_wrist) # writing outputs out_file.write('", "position\\n') for i in range(0,3): out_file.write(' in_out.r_Rwrist[{}] = {};\\n'.format(i, x_r[i]))", "out_file.write(' in_out.{}[0] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] =", "= nb_bodies*[None] xj_print = nb_bodies*[None] xgj_print = nb_bodies*[None] for i", "= get_tilde(om_l_elb) x_r = x_r_elb + R_r_elb.T * Dpt_r_wrist x_l", "Rj_print[i][j] = 9 * [None] xj_print[i][j] = 3 * [None]", "= nb_bodies*[None] om = nb_bodies*[None] om_tilde = nb_bodies*[None] Om[0] =", "Rj, xj, xgj, 19, 23, -0.02, -0.005, -0.225) torso_waist_angles(out_file, R,", "= sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')]) Dpt[10] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')]) Dpt[11]", "the symbolic vector for print def print_save_symb_vector(vector, start_name, end_name): new_vector", "for i in range(0, nb_bodies): out_file.write('{}'.format(M[i])) if i == nb_bodies-1:", "{};\\n'.format(i, j, x_l_cont[i][j])) out_file.write('\\n') out_file.write(' // left foot contact points", "for i in range (1, nb_bodies): flag_print = 0 for", "xgj (jacobian) Rj = nb_bodies*[None] xj = nb_bodies*[None] xgj =", "# left leg 'TorsoRoll_id' , 'TorsoPitch_id' , 'TorsoYaw_id' , #", "0.0]) Dpt[18] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')]) Dpt[19] = sp.Matrix([0.0, 0.0,", "print_save_symb_vector(new_vector, 'x{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian of a com", "= 0 for k in range(0, 3): cur_jac = symbolic_jacob_der(Rj,", "* of the anchor point of body i\\n') out_file.write(' *", "xgj[i][j] = sp.zeros(3, 1) Rj_print[i][j] = 9 * [None] xj_print[i][j]", "!'.format(cur_len)) exit() # compute derivative result = 0 # cosine", "xji : jacobian of \\'xi\\'\\n') out_file.write(' * xgji : jacobian", "j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // right foot contact points", "range(0,3): if matrix[i,j] == 0 or matrix[i,j] == 1: new_matrix[i,j]", "{};\\n'.format(i, R_l_elb[i])) out_file.write('\\n') out_file.write(' // right wrist absolute orientation jacobian\\n')", "+ R_r_foot.T * Dpt_r_foot x_l = x_l_foot + R_l_foot.T *", "nb_bodies-1: out_file.write('\\n') else: out_file.write(' }\\n\\n') # from an orientation matrix,", "body_part, body_part, omega_in[0], body_part, omega_in[1])) out_file.write(' in_out.{}[1] = c_z_{}*{} -", "Om[0] = sp.zeros(3, 1) om[0] = sp.zeros(3, 1) for i", "i+1) R[i] = write_symb_matrix(out_file, R[i], 'R{}_'.format(i+1), '') Rt[i] = R[i].T", "in_out.IMU_Angular_Rate[{}];\\n'.format(i+1, i)) out_file.write('\\n // joint cosines\\n') for i in range(1,", "'x{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian of a com point", "= 1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xj_print[i][j][k]))", "R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2({}, {});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) else:", "out_file.write(' IMU{}{} = in_out.IMU_Orientation[{}];\\n'.format(i+1, j+1, 3*i+j)) out_file.write('\\n // IMU -", "i # (previous body is not always body i-1), expressed", "in_out.{}[2] = atan2({}, {});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) else: # epsilon =", "derivatives\\n') out_file.write(' *\\n') out_file.write(' * ////////////////////////\\n') out_file.write(' * // //\\n')", "nb_max_line): if count_elem(in_file, '{}'.format(elem)) >= 1: count = 0 with", "out_file.write('{}'.format(get_string_enum(joint_id_names[i]))) elif count >= 6: count = 0 out_file.write(',\\n {}'.format(get_string_enum(joint_id_names[i])))", "the previous body # DGi : position vector from the", "elements for j in range(0, 3): new_vector[j] = symbolic_jacob_der(Rj, xj,", "in range(0, 3): out_file.write(' in_out.rp_COM[{}] = '.format(i)) flag_first = 0", "= ') for i in range(0, nb_bodies): out_file.write('{}'.format(M[i])) if i", "0.0, 1.0]]) else: return np.array([]) else: if axis == 1:", "Dpt_l_wrist = sp.zeros(3, 1) Dpt_r_wrist[0] = r_wrist_x Dpt_r_wrist[1] = r_wrist_y", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Rfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j,", "out_file.write(' * of the anchor point of body i\\n') out_file.write('", "sine, cosine]]) elif axis == 2: return np.array([[cosine, 0.0, sine],", "= np.array([ -1, # waist 0, 1, 2, 3, 4,", "= 0 if count != 0: out_write.write(';\\n') # print all", "else: out_file.write(', {}'.format(get_string_enum(joint_id_names[i]))) out_file.write('};\\n\\n') out_file.write('/*! \\\\brief main kinematics computation\\n') out_file.write('", "x_l_cont[i] = x_l_foot + R_l_foot.T * Dpt_l_foot_cont[i] # writing outputs", "'LeftKneePitch_id' , 'LeftFootRoll_id' , 'LeftFootPitch_id' , # left leg 'TorsoRoll_id'", "elem): if direct: if axis == 1: return np.array([[elem], [0.0],", "roll, pitch, yaw angles (and derivative) def yaw_pitch_roll_angles(out_file, angle_name, R_matrix,", "elem, nb_max_line): if count_elem(in_file, '{}'.format(elem)) >= 1: count = 0", "left wrist absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Lwrist[{}]", "# print variables declaration def write_variables_declaration(out_file, prefix, min, max): out_file.write('", "rotation matrices out_file.write('\\n // rotation matrices\\n') R = nb_bodies*[None] Rt", "= sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')]) Dg[15] = sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')]) #", "positive neg_flag = 0 pos_str = elem_split[0] elif cur_len ==", "sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0]) Dpt[9] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')]) Dpt[10] =", "out_file.write(' s_y_Rfoot = sin(in_out.theta_Rfoot[1]);\\n') out_file.write(' s_y_Lfoot = sin(in_out.theta_Lfoot[1]);\\n') out_file.write(' s_z_Rfoot", "body # Rdi : rotational matrix between body i and", "# write symbolic jacobian of a com point def write_symb_xgj(nb_bodies,", "range(1, nb_bodies): out_file.write(' s{} = sin(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint", "the lines for line in f: cut_line = line.split(elem) if", "'TorsoYaw_id' , # trunk 'RightShPitch_id' , 'RightShRoll_id' , 'RightShYaw_id' ,", "and its predecessor\\n') out_file.write(' * si : sine of the", "'.format(i)) flag_first = 0 for j in range(0, nb_bodies): if", "for j in range(0, 3): out_file.write(' in_out.r_Rfoot_cont[{}][{}] = {};\\n'.format(i, j,", "get_tilde(om[0]) for i in range(1, nb_bodies): parent_id = parent_body_index[i] Om[i]", "x & xp out_file.write('\\n // anchor point absolute positions and", "xj_print, x[i], i+1) x[i] = write_symb_vector(out_file, x[i], 'x{}_'.format(i+1), '') xp[i]", "0 for k in range(0, nb_bodies): if xgj[k][i][j] != 0:", "= 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac))", "= 0 for i in range(0, nb_contacts): for j in", "j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_foot[j], 'q{}'.format(i+1))", "x_l[i])) out_file.write('\\n') out_file.write(' // left foot absolute velocity\\n') for i", "nb_bodies): out_file.write(' c{} = cos(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint sines\\n')", "= 1 out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j]))) if k == nb_bodies-1: if flag_first:", "for i in range(0, nb_contacts): x_r_cont[i] = x_r_foot + R_r_foot.T", "xp, om, Rj, xj, xgj, 19, 23, -0.02, -0.005, -0.225)", "waist 'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id', # right leg", "{};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // feet absolute", "//\\n') out_file.write('\\n // IMU - rotation matrices\\n') for i in", "== 2: # negative neg_flag = 1 pos_str = elem_split[1]", "foot absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Lfoot[{}] =", "np.array([[0.0], [elem], [0.0]]) elif axis == 3: return np.array([[0.0], [0.0],", "the anchor point of body i # xgi : absolute", "sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')]) Dpt[10] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')]) Dpt[11] =", "leg 'LeftHipPitch_id' , 'LeftHipRoll_id' , 'LeftHipYaw_id' , 'LeftKneePitch_id' , 'LeftFootRoll_id'", "relative # frame of the previous body # Rdi :", "range(1, len(joint_id_names)): count += 1 if i == 1: out_file.write('{}'.format(get_string_enum(joint_id_names[i])))", "= 0 for j in range(0, nb_bodies): if flag_first: out_file.write('", "vector elements for j in range(0, 3): new_vector[j] = symbolic_jacob_der(Rj,", "range(1, nb_bodies): out_file.write(' Om{} = in_out.qd_mot[{}];\\n'.format(i+1, joint_id_names[i])) # write symbolic", "i in range(0, nb_contacts): for j in range(0, 3): out_file.write('", "line in f: cut_line = line.split(' = ') if len(cut_line)", "+ {};\\n'.format(omega_out, body_part, body_part, body_part, omega_in[1], body_part, omega_in[0], omega_in[2])) #", "expressed in the inertial frame) # of the anchor point", "end_name): new_matrix = sp.zeros(3, 3) save_matrix = 9 * [None]", "declaration def write_variables_declaration(out_file, prefix, min, max): out_file.write(' double ') for", "sp.Symbol('L_3_10')]) Dg[5] = sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')]) Dg[6] = sp.Matrix([sp.Symbol('L_1_12'), 0.0", "Dpt = nb_bodies*[None] # waist Dpt[0] = sp.Matrix([0.0, 0.0, 0.0])", "velocity and orientation def wrists_compute(out_file, joint_id_names, R, x, xp, om,", "all the joints for i in range (1, nb_bodies): new_vector", "xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xj[index-1][i], xj_print[index-1][i]] = print_save_symb_vector(new_vector, 'x{}_'.format(index), '_d{}'.format(i+1))", "{};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' // left foot absolute velocity\\n') for", "R_matrix[j], 'q{}'.format(i+1)) [Rj[index-1][i], Rj_print[index-1][i]] = print_save_symb_matrix(new_matrix, 'R{}_'.format(index), '_d{}'.format(i+1)) # write", "jacobian of an anchor point def write_symb_xj(nb_bodies, Rj, xj, xgj,", "{} * counted , only implemented for 0 or 1", "angle_name, R_matrix, epsilon): if epsilon > 0: # epsilon =", "masses M = np.array([ 'M_6', # waist 'M_7' , 'M_8'", "{};\\n'.format(i, R_r_elb[i])) out_file.write('\\n') for i in range(0, 9): out_file.write(' in_out.Lwrist_or[{}]", "flag_first = 0 for i in range (1, nb_bodies): flag_print", "i in range(0, 3): for j in range(0, 3): out_file.write('", "for j in range(1, nb_bodies): flag_print = 0 for k", "9): out_file.write(' in_out.Lwrist_or[{}] = {};\\n'.format(i, R_l_elb[i])) out_file.write('\\n') out_file.write(' // right", "in range(0, 3): if xj_print[i][j][k] != None: if not flag_first:", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]),", "-- variables declaration -- //\\n\\n') print_all_declaration(in_temp, out_file, 100) out_file.write('\\n\\n') #", "sp.zeros(3, 1) # loop on all the vector elements for", "loop on all terms for cur_term in term_list: # detect", "[rad/s]\\n') theta_dot_compute(out_file, om[waist_id], 'omega_waist', 'waist') out_file.write('\\n') out_file.write(' // torso orientation", "out_file.write(' }\\n\\n') # get a string for the enumeration of", "orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[waist_id], 'omega_waist', 'waist') out_file.write('\\n') out_file.write('", "'xp{}_'.format(i+1), '') # jacobian x out_file.write('\\n // jacobian anchor point", "x_l[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first", "//\\n') out_file.write(' * ////////////////////////\\n') out_file.write(' *\\n') out_file.write(' * origin: in", "range(0, 3): out_file.write(' omega_{} = in_out.IMU_Angular_Rate[{}];\\n'.format(i+1, i)) out_file.write('\\n // joint", "nb_bodies*[None] xgp = nb_bodies*[None] for i in range(0, nb_bodies): xg[i]", "// 06 12 //\\n') out_file.write(' * // 07 13 //\\n')", "= x_max Dpt_r_foot_cont[0][1] = y_min Dpt_r_foot_cont[1][1] = y_max Dpt_r_foot_cont[2][1] =", "temporary file in_temp = './{}_temp.cc'.format(out_file_name) file_temp = open(in_temp, 'w') #", "Dpt[22] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')]) Dpt[23] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')])", "'./{}_temp.cc'.format(out_file_name) file_temp = open(in_temp, 'w') # beginning of the file", "the previous body # Rdi : rotational matrix between body", "* waist and torso orientaion angles and derivatives\\n') out_file.write(' *\\n')", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rwrist_der[{}][{}] =", "write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[i], i+1) R[i] = write_symb_matrix(out_file,", "out_file.write('0.0;\\n') out_file.write('\\n') out_file.write(' // global com absolute velocity\\n') for i", "Dpt_r_foot_cont[2][0] = x_max Dpt_r_foot_cont[3][0] = x_max Dpt_r_foot_cont[0][1] = y_min Dpt_r_foot_cont[1][1]", "i in range(0,3): out_file.write(' in_out.rp_Rfoot[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write('", "pitch hip rotations\\n') out_file.write(' * inertial frame: located at the", "new_vector = sp.zeros(3, 1) save_vector = 3 * [None] for", "= {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // wrists absolute", "= sp.zeros(3, 1) Rj_print[i][j] = 9 * [None] xj_print[i][j] =", "int(value.split('_')[1]) return True, a, b except: return False, -1, -1", "= {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // feet", "ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\\n{\\n') # compute the center of mass position and", "# compute the feet position, velocity and orientation def feet_compute(out_file,", "frame)\\n') out_file.write(' * of the anchor point of body i\\n')", "xgj_print = nb_bodies*[None] for i in range(0, nb_bodies): Rj[i] =", "waist and torso orientaion angles and derivatives\\n') out_file.write(' *\\n') out_file.write('", "+ R_r_foot.T * Dpt_r_foot_cont[i] x_l_cont[i] = x_l_foot + R_l_foot.T *", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(Rj_print[i][j][k])) out_file.write(' }\\n') #", "Dpt[7] = sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0]) Dpt[8] = sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0])", "end_name): new_vector = sp.zeros(3, 1) save_vector = 3 * [None]", "sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')]) Dg[8] = sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')]) Dg[9] =", "sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2]", "out_file.write(' c_y_Lfoot = cos(in_out.theta_Lfoot[1]);\\n') out_file.write(' c_z_Rfoot = cos(in_out.theta_Rfoot[2]);\\n') out_file.write(' c_z_Lfoot", "xp[parent_id] + om_tilde[parent_id] * (Rt[parent_id] * Dpt[i]) write_symb_xj(nb_bodies, Rj, xj,", "x, xp, om, Rj, xj, xgj, 6, 12, -0.06, 0.08,", "1 out_file.write('({}*xg{}_{}'.format(M[j], j+1, i+1)) if j == nb_bodies-1: if flag_first:", "if (flag_jacob)\\n {\\n') for i in range(1, nb_bodies): for j", "Dpt[0] = sp.Matrix([0.0, 0.0, 0.0]) # right leg Dpt[1] =", "'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id', # right leg 'LeftHipPitch_id' , 'LeftHipRoll_id' ,", "open('./{}.cc'.format(out_file_name), 'w') with open(in_temp, 'r') as f: # loop on", "out_file.write('({}*xgp{}_{}'.format(M[j], j+1, i+1)) if j == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n')", "* Dg[i]) write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, xg[i], i+1) xg[i]", "xp_r_elb = xp[r_elb_id] xp_l_elb = xp[l_elb_id] om_r_elb = om[r_elb_id] om_l_elb", "nb_bodies): parent_id = parent_body_index[i] Om[i] = get_vector_axis(rot_axis[i], 1, sp.Symbol('Om{}'.format(i+1))) om[i]", "') for i in range(0,3): for j in range(0,3): out_file.write('{}{}{}'.format(prefix,", "direct: if axis == 1: return np.array([[1.0, 0.0, 0.0], [0.0,", "not always body i-1), expressed in the relative\\n') out_file.write(' *", "range(0, 3): Dpt_l_foot_cont[i][j] = Dpt_r_foot_cont[i][j] x_r_cont = nb_contacts * [None]", "out_file.write(' inv_c_y_torso = 1.0 / c_y_torso;\\n\\n') out_file.write(' // waist orientation", "print def print_save_symb_vector(vector, start_name, end_name): new_vector = sp.zeros(3, 1) save_vector", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xgj_print[i][j][k])) out_file.write(' }\\n')", "symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xj[index-1][i], xj_print[index-1][i]] = print_save_symb_vector(new_vector, 'x{}_'.format(index),", "# left arm ]) out_file_name = 'forward_kinematics' gen_symbolic_out(out_file_name, nb_bodies, rot_axis,", "for print def print_save_symb_vector(vector, start_name, end_name): new_vector = sp.zeros(3, 1)", "R_matrix[1], R_matrix[0])) # compute the time derivatives of 'yaw_pitch_roll_angles' def", "//\\n') out_file.write(' * // 20 01 24 //\\n') out_file.write(' *", "np.array(['0', # waist 'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id', #", "xp_l = xp_l_elb + om_tilde_l_elb * (R_l_elb.T * Dpt_l_wrist) #", "file def write_file_end(out_file): out_file.write('}\\n') # print matrix components declaration def", "file os.remove(in_temp) # main script # rotation axis for each", "sp.Symbol('DPT_3_29') Dpt_r_foot_cont = nb_contacts * [None] Dpt_l_foot_cont = nb_contacts *", ", sp.Symbol('L_3_9')]) Dg[4] = sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')]) Dg[5] = sp.Matrix([sp.Symbol('L_1_11'),", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xgj_print[i][j][k])) out_file.write('", "0.0]) Dpt[3] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')]) Dpt[4] = sp.Matrix([0.0, 0.0,", "sp.Symbol('L_3_19')]) Dg[14] = sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')]) Dg[15] = sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'),", "sp.Symbol('L_3_22')]) Dg[17] = sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')]) Dg[18] = sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'),", "'M_26', 'M_27', 'M_28', 'M_29' # left arm ]) # joint", "der_elem(cur_term_split[1], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[0].strip()) # other else: print('Error: {}", "# right arm 'LeftShPitch_id' , 'LeftShRoll_id' , 'LeftShYaw_id' , 'LeftElbPitch_id'", "as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Rfoot', R[r_foot_id], 1) out_file.write('\\n') out_file.write(' //", "count = 0 out_file.write(',\\n {}'.format(get_string_enum(joint_id_names[i]))) else: out_file.write(', {}'.format(get_string_enum(joint_id_names[i]))) out_file.write('};\\n\\n') out_file.write('/*!", "out_file.write(' in_out.r_Lfoot[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' // left foot", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}]", "isInt(value): try: int(value) return True except: return False # return", "in range(0, 3): Dpt_l_foot_cont[i][j] = Dpt_r_foot_cont[i][j] x_r_cont = nb_contacts *", "nb_bodies*[None] for j in range(0, nb_bodies-1): Rj[i][j] = sp.zeros(3, 3)", "%a, %b also returned) def isVec(value): try: a = int(value.split('_')[0].split('x')[1])", "der_elem(elem_str, Rj, xj, xgj, der_var): # element to derive (string)", "elif pos_str == 's{}'.format(der_q): result += sp.Symbol('c{}'.format(der_q)) # other else:", "# # 06 12 # # 07 13 # #", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_der[{}][{}] =", "write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x_vector, index): # loop on", "# right arm Dg[16] = sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')]) Dg[17] =", "jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0 for i", "xgj_print[i][j] = 3 * [None] # rotation matrices out_file.write('\\n //", "print_all_declaration(in_file, out_write, nb_max_char): count = 0 with open(in_file,'r') as f:", "absolute positions and velocities\\n') xg = nb_bodies*[None] xgp = nb_bodies*[None]", "derivatives [rad/s]\\n') theta_dot_compute(out_file, om[torso_id], 'omega_torso', 'torso') # compute the feet", "str(symb_var).replace('- ','-').replace('-','+-').split('+') if term_list[0] == '': term_list.pop(0) result = 0", "out_file.write(' in_out.{}[0] = atan2({}, {});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] =", "if i == max: out_file.write(';\\n') else: out_file.write(', ') # variables", "= 1.0 / c_y_Rfoot;\\n') out_file.write(' inv_c_y_Lfoot = 1.0 / c_y_Lfoot;\\n\\n')", "x_r_cont = nb_contacts * [None] x_l_cont = nb_contacts * [None]", "for i in range(0,3): out_file.write(' in_out.rp_Lfoot[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n')", "out_file.write(' in_out.Lwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') # get", "* R[parent_body_index[i]] write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[i], i+1) R[i]", "// left wrist absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "jacobian rotation matrices\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0", "in_temp = './{}_temp.cc'.format(out_file_name) file_temp = open(in_temp, 'w') # beginning of", "-v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]]) # get", "if len(cut_line) == 2: count += 1 return count #", "# trunk Dg[13] = sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')]) Dg[14] = sp.Matrix([sp.Symbol('L_1_20'),", "and torso orientaion angles and derivatives\\n') out_file.write(' *\\n') out_file.write(' *", "om[0] = sp.zeros(3, 1) for i in range(0,3): om[0][i] =", "Dg[16] = sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')]) Dg[17] = sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')])", "file write_file_end(file_temp) file_temp.close() # output file out_file = open('./{}.cc'.format(out_file_name), 'w')", "of joints def get_string_enum(cur_string): cur_split = cur_string.split('_') if len(cur_split) >=", "09 //\\n') out_file.write(' * // 04 10 //\\n') out_file.write(' *", "body i # xgi : absolute position vector of the", "// 18 15 22 //\\n') out_file.write(' * // 19 14", "// left foot contact points jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "= {};\\n'.format(i, R_r_elb[i])) out_file.write('\\n') for i in range(0, 9): out_file.write('", "vector[i] save_vector[i] = None else: elem_name = '{}{}{}'.format(start_name, i+1, end_name)", "= om[r_foot_id] om_l_foot = om[l_foot_id] R_r_foot = R[r_foot_id] R_l_foot =", "out_file.write(' */\\n') out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\\n{\\n') # compute the center of", "i in range(1, nb_bodies): out_file.write(' Om{} = in_out.qd_mot[{}];\\n'.format(i+1, joint_id_names[i])) #", "s_y_{} * (s_z_{}*{} + c_z_{}*{}) + {};\\n'.format(omega_out, body_part, body_part, body_part,", "out_file.write(' * Rti : transpose matrix of Ri\\n') out_file.write(' *", "save the symbolic vector for print def print_save_symb_vector(vector, start_name, end_name):", "sp.Matrix([0.0, 0.0, 0.0]) Dpt[15] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')]) # right", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xj_print[i][j][k])) out_file.write(' }\\n') # xg", "result += Rj[a-1][der_q-1][(b-1)*3+(c-1)] # vector elif vec_flag: result += xj[d-1][der_q-1][e-1]", "only implemented for 0 or 1 !'.format(cur_len-1)) exit() return result", "new_vector[i] = vector[i] else: flag_print = 1 elem_name = '{}{}{}'.format(start_name,", "out_file.write('\\n // jacobian com absolute positions\\n') out_file.write(' if (flag_jacob)\\n {\\n')", ": sine of the relative angle before body i #", "!= 0: if flag_first: out_file.write(' + {}*{}'.format(M[k], str(xgj[k][i][j]))) else: flag_first", "om[torso_id], 'omega_torso', 'torso') # compute the feet position, velocity and", "file_temp = open(in_temp, 'w') # beginning of the file write_file_beginning(file_temp,", "b, c] = isRot(pos_str) [vec_flag, d, e] = isVec(pos_str) #", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]),", "xp, om, Rj, xj, xgj, r_elb_id, l_elb_id, r_wrist_x, r_wrist_y, r_wrist_z):", "symbolic variables declarations nb_contacts = 4 x_r_foot = x[r_foot_id] x_l_foot", "'LeftHipRoll_id' , 'LeftHipYaw_id' , 'LeftKneePitch_id' , 'LeftFootRoll_id' , 'LeftFootPitch_id' ,", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lwrist_der[{}][{}] =", "symbolic variables declarations x_r_elb = x[r_elb_id] x_l_elb = x[l_elb_id] xp_r_elb", "[None] Dpt_l_foot_cont = nb_contacts * [None] for i in range(0,", "= line.split(elem) if len(cut_line) == 2: count += 1 return", "nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') if i != nb_bodies-1:", "flag_print = 0 for k in range(0, 3): cur_jac =", "import os ###################### # # # 17 16 21 #", "== 1: # positive neg_flag = 0 pos_str = elem_split[0]", "range (1, nb_bodies): new_vector = sp.zeros(3, 1) # loop on", "0.0, sp.Symbol('DPT_3_43')]) # left arm Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')])", "double ') for i in range(0,3): for j in range(0,3):", "') out_file.write(' // global com absolute position\\n') for i in", "in_out.r_Rwrist[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' // right wrist absolute", "3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1)) if cur_jac", "= {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','')) new_matrix[i,j] = sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return", "np.array([ -1, # waist 0, 1, 2, 3, 4, 5,", "range(0, 3): for j in range(0, 3): out_file.write(' IMU{}{} =", "in_out.qd_mot[{}];\\n'.format(i+1, joint_id_names[i])) # write symbolic vector and replace symbolic variable", "*\\n') out_file.write(' * xi : absolute position vector (from origin,", "IMU - rotation matrices\\n') for i in range(0, 3): for", "for each joint before body i (1:x, 2:y, 3:z) rot_axis", "# negative neg_flag = 1 pos_str = elem_split[1] else: print('Error:", "and j == 2: out_file.write(';\\n') else: out_file.write(', ') # print", "sp.zeros(3, 3) for i in range(0, 3): for j in", "# origin: in the waist, middle point between the two", "1.0 / c_y_Rfoot;\\n') out_file.write(' inv_c_y_Lfoot = 1.0 / c_y_Lfoot;\\n\\n') out_file.write('", "06 12 //\\n') out_file.write(' * // 07 13 //\\n') out_file.write('", "out_file.write(' }\\n\\n') out_file.write(' // left wrist absolute orientation jacobian\\n') out_file.write('", "= sp.zeros(3, 1) save_vector = 3 * [None] for i", "def gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M): #", "# left leg Dg[7] = sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')]) Dg[8] =", "if flag_print: out_file.write('\\n') return new_vector # write symbolic matrix and", "left leg Dpt[7] = sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0]) Dpt[8] = sp.Matrix([0.0,", "in negative detection !'.format(cur_len)) exit() # compute derivative result =", "= R[r_elb_id] R_l_elb = R[l_elb_id] Dpt_r_wrist = sp.zeros(3, 1) Dpt_l_wrist", "cosine of the relative angle before body i\\n') out_file.write(' *\\n')", "out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') if i != nb_bodies-1: out_file.write('\\n') else: out_file.write('", "= sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')]) Dg[18] = sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')]) Dg[19]", "xj, xgj, r_elb_id, l_elb_id, r_wrist_x, r_wrist_y, r_wrist_z): # symbolic variables", "* xgpi : derivative of xgi\\n') out_file.write(' * omi :", "in range(0, 9): out_file.write(' in_out.Lwrist_or[{}] = {};\\n'.format(i, R_l_elb[i])) out_file.write('\\n') out_file.write('", "nb_contacts): Dpt_r_foot_cont[i][2] = sp.Symbol('DPT_3_16') for i in range(0, nb_contacts): for", "// left foot absolute position\\n') for i in range(0,3): out_file.write('", "Dpt[15] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')]) # right arm Dpt[16] =", "anchor point of body i # xgi : absolute position", "[0.0, cosine, sine], [0.0, -sine, cosine]]) elif axis == 2:", "# # 05 11 # # 06 12 # #", "Rj, xj, xgj, der_var): # element to derive (string) elem_str", "// right foot absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "out_write, nb_max_char): count = 0 with open(in_file,'r') as f: #", "// //\\n') out_file.write(' * // 17 16 21 //\\n') out_file.write('", "3): out_file.write(' in_out.r_Lfoot_cont[{}][{}] = {};\\n'.format(i, j, x_l_cont[i][j])) out_file.write('\\n') out_file.write(' //", "and outputs class\\n') out_file.write(' *\\n') out_file.write(' * computation of:\\n') out_file.write('", "current body i # Omi : rotational vector from the", "Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')]) Dpt[21] = sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0])", "Dpt[19] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')]) # left arm Dpt[20] =", "sp.Symbol('L_2_16'), sp.Symbol('L_3_16')]) Dg[11] = sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')]) Dg[12] = sp.Matrix([sp.Symbol('L_1_18'),", "out_file.write(' }\\n\\n') # from an orientation matrix, compute the roll,", "axis == 2: return np.array([[cosine, 0.0, -sine], [0.0, 1.0, 0.0],", "print the declaration of an element def print_declaration_elem(in_file, out_write, elem,", "for j in range(0, 3): out_file.write(' in_out.r_Lfoot_cont[{}][{}] = {};\\n'.format(i, j,", "Dpt_l_foot_cont[i][j] = Dpt_r_foot_cont[i][j] x_r_cont = nb_contacts * [None] x_l_cont =", "sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')]) Dpt[23] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')]) ## COM", "om[l_foot_id], 'omega_Lfoot', 'Lfoot') out_file.write('\\n') # compute the wrists position, velocity", "product elif cur_len == 2: result += der_elem(cur_term_split[0], Rj, xj,", "Rti : transpose matrix of Ri # xji : jacobian", "// right foot contact points jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "get_tilde(om_r_foot) om_tilde_l_foot = get_tilde(om_l_foot) x_r = x_r_foot + R_r_foot.T *", "Dg[21] = sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')]) Dg[22] = sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')])", "'TorsoPitch_id' , 'TorsoYaw_id' , # trunk 'RightShPitch_id' , 'RightShRoll_id' ,", "out_file.write(' if ((!c_y_Rfoot) || (!c_y_Lfoot))\\n {\\n') out_file.write(' return;\\n }\\n\\n') out_file.write('", "nb_bodies): flag_print = 0 for k in range(0, 3): if", "0.0 , sp.Symbol('L_3_18')]) # trunk Dg[13] = sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')])", "sp.Symbol('DPT_3_16') for i in range(0, nb_contacts): for j in range(0,", "// right foot jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first =", "0 for k in range(0, 9): if Rj_print[i][j][k] != None:", "3*i+j)) out_file.write('\\n // IMU - angles velocity\\n') for i in", "in_out.r_COM[{}] = '.format(i)) flag_first = 0 for j in range(0,", "of Ri # xji : jacobian of 'xi' # xgji", "0.0, cosine]]) elif axis == 3: return np.array([[cosine, sine, 0.0],", ", 'M_9' , 'M_10', 'M_11', 'M_12', # right leg 'M_13',", "# parent index parent_body_index = np.array([ -1, # waist 0,", "False, -1, -1, -1 # return true if it has", "out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0 for i in", "nb_bodies): flag_print = 0 for j in range(0,9): cur_jac =", "(info from IMU)\\n') out_file.write(' *\\n') out_file.write(' * Di : position", "an orientation matrix, compute the roll, pitch, yaw angles (and", "= y_max Dpt_r_foot_cont[2][1] = y_min Dpt_r_foot_cont[3][1] = y_max for i", "range (1, nb_bodies): new_matrix = sp.zeros(3, 3) # loop on", "in_out.{}[0] = inv_c_y_{} * (c_z_{}*{} + s_z_{}*{});\\n'.format(omega_out, body_part, body_part, omega_in[0],", "rotations # inertial frame: located at the origin (waist), but", "the anchor point of body i to its COM (center", "out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_Rfoot = 1.0 / c_y_Rfoot;\\n') out_file.write('", "result += sp.Symbol('c{}'.format(der_q)) # other else: [rot_flag, a, b, c]", "= '{}_{}'.format(new_string, cur_split[i].upper()) return new_string # write the end of", "result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[1].strip()) result += der_elem(cur_term_split[1],", "in_out.r_COM_der[{}][{}] = '.format(get_string_enum(joint_id_names[i]), j)) flag_first = 0 for k in", "xj[d-1][der_q-1][e-1] # apply negative if neg_flag: result = -result return", "range(0, 3): new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xgj[index-1][i],", "vector, start_name, end_name): new_vector = sp.zeros(3, 1) flag_print = 0", "for i in range(0,3): if vector[i] == 0 or vector[i]", "= sp.zeros(3, 1) Dpt_r_foot[2] = sp.Symbol('DPT_3_16') Dpt_l_foot[2] = sp.Symbol('DPT_3_29') Dpt_r_foot_cont", "= sp.zeros(3, 1) flag_print = 0 for i in range(0,3):", "out_write.write(';\\n') count = 0 if count != 0: out_write.write(';\\n') #", "the ground (info from IMU)\\n') out_file.write(' *\\n') out_file.write(' * Di", "axis der_q = int(der_var.replace('q','')) # detect positive/negative elem_split = elem_str.split('-')", "sin(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' if ((!c_y_Rfoot) || (!c_y_Lfoot))\\n {\\n') out_file.write(' return;\\n }\\n\\n')", "Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[1].strip()) result += der_elem(cur_term_split[1], Rj, xj, xgj,", "xj, xgj, xgj_print, xg[i], i+1) xg[i] = write_symb_vector(out_file, xg[i], 'xg{}_'.format(i+1),", "* Dpt_l_foot xp_r = xp_r_foot + om_tilde_r_foot * (R_r_foot.T *", "symbolic computation symbolic_computation(file_temp, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M)", "sp.Symbol('DPT_3_8')]) Dpt[4] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')]) Dpt[5] = sp.Matrix([0.0, 0.0,", "Dpt[10] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')]) Dpt[11] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')])", "rotational matrix # Rti : transpose matrix of Ri #", "else: [rot_flag, a, b, c] = isRot(pos_str) [vec_flag, d, e]", "if i == nb_bodies-1: out_file.write(';\\n\\n') else: out_file.write(' + ') out_file.write('", "xg = nb_bodies*[None] xgp = nb_bodies*[None] for i in range(0,", "return count # print the declaration of an element def", "// 20 01 24 //\\n') out_file.write(' * // 02 08", "1: return np.array([[-elem], [0.0], [0.0]]) elif axis == 2: return", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]),", "in_out.Rfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left", "# left leg 0, 13, 14, # trunk 15, 16,", "sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9') , sp.Symbol('L_3_9')]) Dg[4] = sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')])", "of the file write_file_beginning(file_temp, joint_id_names) # variables initialization write_intialization(file_temp, nb_bodies,", "3) R[0] = sp.zeros(3, 3) for i in range(0, 3):", "= 0 with open(in_file,'r') as f: # loop on all", "0.0], [-sine, 0.0, cosine]]) elif axis == 3: return np.array([[cosine,", "element to derive (string) elem_str = elem_str.replace('- ','-').strip() # derivative", "+ ') out_file.write(' // global com absolute position\\n') for i", "om[r_foot_id], 'omega_Rfoot', 'Rfoot') out_file.write('\\n') out_file.write(' // left foot orientation angle", "sp.Matrix([sp.Symbol('L_1_18'), 0.0 , sp.Symbol('L_3_18')]) # trunk Dg[13] = sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'),", "17 16 21 # # 18 15 22 # #", "in range(0,3): if matrix[i,j] == 0 or matrix[i,j] == 1:", "= isRot(pos_str) [vec_flag, d, e] = isVec(pos_str) # rotation matrix", "computation om_tilde_r_elb = get_tilde(om_r_elb) om_tilde_l_elb = get_tilde(om_l_elb) x_r = x_r_elb", "left arm ]) out_file_name = 'forward_kinematics' gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index,", "current body i\\n') out_file.write(' * Omi : rotational vector from", "-- //\\n') out_file.write('\\n // IMU - rotation matrices\\n') for i", "position vector (from origin, expressed in the inertial frame)\\n') out_file.write('", "return np.array([[cosine, 0.0, -sine], [0.0, 1.0, 0.0], [sine, 0.0, cosine]])", "for i in range(0, nb_contacts): for j in range(0, 3):", "out_file.write(' // right foot contact points absolute position\\n') for i", "foot absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Rfoot[{}] =", "9): if Rj_print[i][j][k] != None: if not flag_first: flag_first =", "in f: cut_line_1 = line.split(elem) cut_line_2 = line.split(' = ')", "joint relative velocities\\n') for i in range(1, nb_bodies): out_file.write(' Om{}", "if vector[i] == 0 or vector[i] == 1: new_vector[i] =", "save_vector = 3 * [None] for i in range(0,3): if", "* // 07 13 //\\n') out_file.write(' * // //\\n') out_file.write('", "write the end of the file def write_file_end(out_file): out_file.write('}\\n') #", "i # Ri : absolute rotational matrix # Rti :", "of body i to its COM (center of mass) G_i,", "0.0 , sp.Symbol('L_3_12')]) # left leg Dg[7] = sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'),", "orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[torso_id], 'omega_torso', 'torso') # compute", "nb_bodies*[None] xgj_print = nb_bodies*[None] for i in range(0, nb_bodies): Rj[i]", "sp.Symbol('DPT_2_3'), 0.0]) Dpt[8] = sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0]) Dpt[9] = sp.Matrix([0.0,", "== 0 or vector[i] == 1: new_vector[i] = vector[i] else:", "loop on all the vector elements for j in range(0,", "Dpt_r_foot_cont[i] = sp.zeros(3, 1) Dpt_l_foot_cont[i] = sp.zeros(3, 1) Dpt_r_foot_cont[0][0] =", "write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R_matrix, index): # loop on", "out_write.write(' double {}'.format(cut_line_2[0].strip())) else: out_write.write(', {}'.format(cut_line_2[0].strip())) count += 1 if", "out_file.write('\\n') out_file.write('{}'.format(Rj_print[i][j][k])) out_file.write(' }\\n') # omega out_file.write('\\n // joint absolute", "write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[0], 1) R[0] = write_symb_matrix(out_file,", "+= len(cut_line[0].strip()) + 2 if count >= nb_max_char: out_write.write(';\\n') count", "nb_bodies-1): Rj[i][j] = sp.zeros(3, 3) xj[i][j] = sp.zeros(3, 1) xgj[i][j]", "Di : position vector from the anchor point of the", "if count == 0: out_write.write(' double {}'.format(cut_line_2[0].strip())) else: out_write.write(', {}'.format(cut_line_2[0].strip()))", "Om[i] = get_vector_axis(rot_axis[i], 1, sp.Symbol('Om{}'.format(i+1))) om[i] = om[parent_id] + Rt[parent_id]", "Ri : absolute rotational matrix # Rti : transpose matrix", "if matrix[i,j] == 0 or matrix[i,j] == 1: new_matrix[i,j] =", "Dpt[1] = sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0]) Dpt[2] = sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0])", "0.0]]) # get rotation matrix def get_rotation_matrix(axis, direct, cosine, sine):", "Rj = nb_bodies*[None] xj = nb_bodies*[None] xgj = nb_bodies*[None] Rj_print", "matrix components declaration def write_matrix_declaration(out_file, prefix): out_file.write(' double ') for", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Lwrist_or_der[{}][{}] =", "xgj, xgj_print, x_vector, index): # loop on all the joints", "out_file.write('/*! \\\\brief main kinematics computation\\n') out_file.write(' *\\n') out_file.write(' * \\\\param[in,out]", "03 09 //\\n') out_file.write(' * // 04 10 //\\n') out_file.write('", "# write the end of the file def write_file_end(out_file): out_file.write('}\\n')", "i+1) xg[i] = write_symb_vector(out_file, xg[i], 'xg{}_'.format(i+1), '') xgp[i] = write_symb_vector(out_file,", "body i (1:x, 2:y, 3:z) rot_axis = np.array([0, # waist", "sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')]) Dpt[21] = sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0]) Dpt[22] = sp.Matrix([0.0,", "cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left wrist absolute position\\n') for", "-result return result # compute the derivative of an expression", "np.array([[cosine, sine, 0.0], [-sine, cosine, 0.0], [0.0, 0.0, 1.0]]) else:", "Rt[parent_id] * Dpt[i] xp[i] = xp[parent_id] + om_tilde[parent_id] * (Rt[parent_id]", "return np.array([]) # get vector axis def get_vector_axis(axis, direct, elem):", "compute the feet position, velocity and orientation def feet_compute(out_file, joint_id_names,", "nb_bodies*[None] xgj_print[i] = nb_bodies*[None] for j in range(0, nb_bodies-1): Rj[i][j]", "+ c_z_{}*{}) + {};\\n'.format(omega_out, body_part, body_part, body_part, omega_in[1], body_part, omega_in[0],", "index): # loop on all the joints for i in", "= sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')]) Dg[14] = sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')]) Dg[15]", "Dpt[8] = sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0]) Dpt[9] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')])", "if pos_str == 'c{}'.format(der_q): result += -sp.Symbol('s{}'.format(der_q)) # sine elif", "R_l_elb = R[l_elb_id] Dpt_r_wrist = sp.zeros(3, 1) Dpt_l_wrist = sp.zeros(3,", "in range(0,3): out_file.write(' in_out.r_Lwrist[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' //", "# rotation matrix if rot_flag: result += Rj[a-1][der_q-1][(b-1)*3+(c-1)] # vector", "'x1_', '') xp[0] = write_symb_vector(out_file, xp[0], 'xp1_', '') for i", "%c also returned) def isRot(value): try: a = int(value.split('_')[0].split('R')[1]) b", "!= nb_bodies-1: out_file.write('\\n') else: out_file.write(' }\\n\\n') # from an orientation", "sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7') , sp.Symbol('L_3_7')]) Dg[2] = sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8')", "a com point def write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, x_vector,", "return np.array([[1.0, 0.0, 0.0], [0.0, cosine, -sine], [0.0, sine, cosine]])", "3) for i in range(0, 3): for j in range(0,", "def wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj,", "R[i] = Rd[i] * R[parent_body_index[i]] write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print,", "for i in range(0,3): om[0][i] = sp.Symbol('omega_{}'.format(i+1)) om[0] = write_symb_vector(out_file,", "feet position, velocity and orientation\\n') out_file.write(' * waist and torso", "xj, xgj, der_var)*sp.Symbol(cur_term_split[0].strip()) # other else: print('Error: {} * counted", "and velocity\\n') out_file.write(' * feet position, velocity and orientation\\n') out_file.write('", "of an anchor point def write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print,", "in range (1, nb_bodies): flag_print = 0 for k in", "in_out.r_Rfoot[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' // right foot absolute", "in range(1, nb_bodies): Rd[i] = get_rotation_matrix(rot_axis[i], 1, sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1))) R[i]", "out_file.write(' * omi : absolute rotational vector of body i\\n')", "i # xpi : derivative of xi # xgpi :", "{}'.format(cut_line[0].strip())) count += len(cut_line[0].strip()) + 2 if count >= nb_max_char:", "result # compute the derivative of an expression (for jacobian)", "out_file.write(' + ') out_file.write(' // global com absolute position\\n') for", "out_file.write(' // global com jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') for", "G_i of body i # xpi : derivative of xi", "-sine, cosine]]) elif axis == 2: return np.array([[cosine, 0.0, -sine],", "for i in range(0, nb_contacts): for j in range (1,", ">= nb_max_line: out_write.write(';\\n') count = 0 if count != 0:", "count += 1 return count # print the declaration of", "sine elif pos_str == 's{}'.format(der_q): result += sp.Symbol('c{}'.format(der_q)) # other", "out_file.write(', {}'.format(get_string_enum(joint_id_names[i]))) out_file.write('};\\n\\n') out_file.write('/*! \\\\brief main kinematics computation\\n') out_file.write(' *\\n')", "* (previous body is not always body i-1), expressed in", "1.0 / c_y_Lfoot;\\n\\n') out_file.write(' // right foot orientation angle derivatives", "Dpt_l_wrist) # writing outputs out_file.write(' // right wrist absolute position\\n')", "01 24 # # 02 08 # # 03 09", "xj, xgj, 19, 23, -0.02, -0.005, -0.225) torso_waist_angles(out_file, R, om,", "point of body i # xgi : absolute position vector", "1 or 2 in negative detection !'.format(cur_len)) exit() # compute", "# no product if cur_len == 1: result += der_elem(cur_term_split[0],", "flag_print = 1 elem_name = '{}{}{}'.format(start_name, i+1, end_name) out_file.write(' {}", "except: return False, -1, -1 # count the number of", "absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Rwrist[{}] = {};\\n'.format(i,", "# # 03 09 # # 04 10 # #", "xgpi : derivative of xgi\\n') out_file.write(' * omi : absolute", "out_file.write('\\n // jacobian rotation matrices\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "= 1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xgj_print[i][j][k]))", "') for i in range(min, max+1): out_file.write('{}{}'.format(prefix, i)) if i", "= '{}{}'.format(new_string, cur_split[i]) else: new_string = cur_string cur_split = filter(None,", ": jacobian of 'xgi' # Rji : jacobian of 'Ri'", "sp.zeros(3, 3) # loop on all the matrix elements for", "else: out_write.write(', {}'.format(cut_line[0].strip())) count += len(cut_line[0].strip()) + 2 if count", "- s_z_{}*{};\\n'.format(omega_out, body_part, omega_in[1], body_part, omega_in[0])) out_file.write(' in_out.{}[2] = inv_c_y_{}", "symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xgj[index-1][i], xgj_print[index-1][i]] = print_save_symb_vector(new_vector, 'xg{}_'.format(index),", "s_y_torso = sin(in_out.theta_torso[1]);\\n') out_file.write(' s_z_waist = sin(in_out.theta_waist[2]);\\n') out_file.write(' s_z_torso =", "parent_body_index, Dpt, Dg, M): out_file.write('\\n\\n // -- symbolic computation --", "// com absolute positions and velocities\\n') xg = nb_bodies*[None] xgp", "else: flag_first = 1 out_file.write('({}*xg{}_{}'.format(M[j], j+1, i+1)) if j ==", "range(1, nb_bodies): for j in range(0, 3): out_file.write(' in_out.r_COM_der[{}][{}] =", "out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j]))) if k == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else:", "1 out_file.write('\\n') out_file.write('{}'.format(xj_print[i][j][k])) out_file.write(' }\\n') # xg & xgp out_file.write('\\n", "out_file.write('\\n') out_file.write('{}'.format(xj_print[i][j][k])) out_file.write(' }\\n') # xg & xgp out_file.write('\\n //", "= 9 * [None] xj_print[i][j] = 3 * [None] xgj_print[i][j]", "out_file.write(' * ////////////////////////\\n') out_file.write(' *\\n') out_file.write(' * origin: in the", "# loop on all the lines for line in f:", "of all terms term_list = str(symb_var).replace('- ','-').replace('-','+-').split('+') if term_list[0] ==", "elif count >= 6: count = 0 out_file.write(',\\n {}'.format(get_string_enum(joint_id_names[i]))) else:", "i == nb_bodies-1: out_file.write(';\\n\\n') else: out_file.write(' + ') out_file.write(' //", "= 0 for j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj,", "for j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_foot[j],", "'theta_Lfoot', R[l_foot_id], 1) out_file.write('\\n') out_file.write(' c_y_Rfoot = cos(in_out.theta_Rfoot[1]);\\n') out_file.write(' c_y_Lfoot", "* [None] # rotation matrices out_file.write('\\n // rotation matrices\\n') R", "= 0 for i in range(0, nb_bodies): for j in", "range(0, nb_contacts): Dpt_r_foot_cont[i] = sp.zeros(3, 1) Dpt_l_foot_cont[i] = sp.zeros(3, 1)", "Dg = nb_bodies*[None] # waist Dg[0] = sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')])", "orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_torso', R[torso_id], 1) out_file.write('\\n')", "om, Rj, xj, xgj, 6, 12, -0.06, 0.08, -0.045, 0.045)", "= xp_r_elb + om_tilde_r_elb * (R_r_elb.T * Dpt_r_wrist) xp_l =", "3): out_file.write(' in_out.r_COM[{}] = '.format(i)) flag_first = 0 for j", "R[i], i+1) R[i] = write_symb_matrix(out_file, R[i], 'R{}_'.format(i+1), '') Rt[i] =", "term_list[0] == '': term_list.pop(0) result = 0 # loop on", "position, velocity and orientation\\n') out_file.write(' * waist and torso orientaion", "frame: located at the origin (waist), but aligned with the", "for i in range(1, len(cur_split)-1): new_string = '{}{}'.format(new_string, cur_split[i]) else:", "theta_dot_compute(out_file, omega_in, omega_out, body_part): out_file.write(' in_out.{}[0] = inv_c_y_{} * (c_z_{}*{}", "Dg, M): out_file.write('\\n\\n // -- symbolic computation -- //\\n') #", "for i in range(1, nb_bodies): parent_id = parent_body_index[i] x[i] =", "for j in range(0, nb_bodies): if flag_first: out_file.write(' + {}*{}'.format(M[j],", "}\\n\\n') out_file.write(' // left foot absolute orientation jacobian\\n') out_file.write(' if", "in range(0, 3): if xgj_print[i][j][k] != None: if not flag_first:", "# compute the derivative of an expression (for jacobian) def", "its name def write_symb_vector(out_file, vector, start_name, end_name): new_vector = sp.zeros(3,", "out_file.write('\\n // -- Collecting results -- //\\n\\n') com_compute(out_file, nb_bodies, joint_id_names,", "# waist 0, 1, 2, 3, 4, 5, # right", "[0.0, 0.0, 1.0]]) else: return np.array([]) # get vector axis", "frame of the previous body # DGi : position vector", "'R{}_'.format(i+1), '') Rt[i] = R[i].T # jacobian rotation matrices out_file.write('\\n", "cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left foot absolute position\\n') for", "R_matrix, index): # loop on all the joints for i", "* \\\\param[in,out] in_out inputs and outputs class\\n') out_file.write(' *\\n') out_file.write('", "= y_max for i in range(0, nb_contacts): Dpt_r_foot_cont[i][2] = sp.Symbol('DPT_3_16')", "= om[r_elb_id] om_l_elb = om[l_elb_id] R_r_elb = R[r_elb_id] R_l_elb =", "omega_in[2])) # angles (position and derivative) of the waist and", "* Dpt_r_foot) xp_l = xp_l_foot + om_tilde_l_foot * (R_l_foot.T *", "out_file.write(' in_out.r_Rfoot_cont[{}][{}] = {};\\n'.format(i, j, x_r_cont[i][j])) out_file.write('\\n') out_file.write(' // right", "* (R_l_foot.T * Dpt_l_foot) for i in range(0, nb_contacts): x_r_cont[i]", "= None else: elem_name = '{}{}{}'.format(start_name, i+1, end_name) save_vector[i] =", "orientaion angles and derivatives\\n') out_file.write(' *\\n') out_file.write(' * ////////////////////////\\n') out_file.write('", "out_file.write(' *\\n') out_file.write(' * \\\\param[in,out] in_out inputs and outputs class\\n')", "range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1)) if", "3 * [None] for i in range(0,3): if vector[i] ==", "in_out.r_Rwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left", "i to its COM (center of mass) G_i, # expressed", "{};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' // left wrist absolute velocity\\n') for", "Rj, xj, xgj, Rj_print, R_matrix, index): # loop on all", "-sine], [0.0, sine, cosine]]) elif axis == 2: return np.array([[cosine,", "waist and the torso def torso_waist_angles(out_file, R, om, waist_id, torso_id):", "= {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left foot", "np.array([]) # compute the derivative of an element (for jacobian)", "waist 2, 1, 3, 2, 1, 2, # right leg", "computation om_tilde_r_foot = get_tilde(om_r_foot) om_tilde_l_foot = get_tilde(om_l_foot) x_r = x_r_foot", "xj, xgj, Rj_print, R[i], i+1) R[i] = write_symb_matrix(out_file, R[i], 'R{}_'.format(i+1),", "sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')]) Dg[5] = sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')]) Dg[6] =", "sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')]) Dg[9] = sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')]) Dg[10] =", "in_out.rp_Lfoot[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' // left foot jacobian\\n')", "# # # ###################### # # origin: in the waist,", "get_tilde(om_r_elb) om_tilde_l_elb = get_tilde(om_l_elb) x_r = x_r_elb + R_r_elb.T *", "// jacobian com absolute positions\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "= {};\\n'.format(i, R_l_foot[i])) out_file.write('\\n') out_file.write(' // right foot absolute orientation", "Dpt[12] = sp.Matrix([0.0, 0.0, 0.0]) # trunk Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'),", "output file def gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg,", "0.0]) # trunk Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')]) Dpt[14] =", "# # 18 15 22 # # 19 14 23", "position and velocity\\n') out_file.write(' * feet position, velocity and orientation\\n')", "elem_name = '{}{}{}'.format(start_name, i+1, end_name) out_file.write(' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*',''))", "'RightFootPitch_id', # right leg 'LeftHipPitch_id' , 'LeftHipRoll_id' , 'LeftHipYaw_id' ,", "absolute rotational matrix # Rti : transpose matrix of Ri", "== 0 or matrix[i,j] == 1: new_matrix[i,j] = matrix[i,j] save_matrix[3*i+j]", "flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') out_file.write('\\n') out_file.write(' // global com jacobian\\n')", "velocity\\n') out_file.write(' * feet position, velocity and orientation\\n') out_file.write(' *", "new_vector # write symbolic matrix and replace symbolic variable by", "joint_id_names, R, x, xp, om, Rj, xj, xgj, r_foot_id, l_foot_id,", "get_tilde(om_l_foot) x_r = x_r_foot + R_r_foot.T * Dpt_r_foot x_l =", "previous body # DGi : position vector from the anchor", "= sin(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' if ((!c_y_Rfoot) || (!c_y_Lfoot))\\n {\\n') out_file.write(' return;\\n", "line.split(elem) if len(cut_line) == 2: count += 1 return count", "range(0,3): out_file.write(' in_out.rp_Lfoot[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' // left", "21, 22 # left arm ]) nb_bodies = len(parent_body_index) ##", "xj, xgj, der_var) # one product elif cur_len == 2:", "out_file.write(' // left foot absolute position\\n') for i in range(0,3):", "position\\n') for i in range(0, nb_contacts): for j in range(0,", "out_file.write(' * // 07 13 //\\n') out_file.write(' * // //\\n')", "for cur_term in term_list: # detect products cur_term_split = cur_term.split('*')", "= 0 for i in range(0,3): if vector[i] == 0", "}\\n\\n') out_file.write(' // left foot contact points absolute position\\n') for", "end_name) save_matrix[3*i+j] = ' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','') new_matrix[i,j] =", "'M_15', 'M_16', 'M_17', 'M_18', # left leg 'M_19', 'M_20', 'M_21',", "and orientation def wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj,", "\\\\brief main kinematics computation\\n') out_file.write(' *\\n') out_file.write(' * \\\\param[in,out] in_out", "before body i # ci : cosine of the relative", "jacobian of \\'xgi\\'\\n') out_file.write(' * Rji : jacobian of \\'Ri\\'\\n')", "out_file.write('\\n // joint relative velocities\\n') for i in range(1, nb_bodies):", "left arm ]) nb_bodies = len(parent_body_index) ## anchor point positions", "= '{}{}{}{}'.format(start_name, i+1, j+1, end_name) out_file.write(' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*',''))", "* (R_l_elb.T * Dpt_l_wrist) # writing outputs out_file.write(' // right", "xgj, R_r_foot[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first:", "waist_id, torso_id): out_file.write(' // waist orientation matrix as angles [rad]\\n')", "= xp[r_elb_id] xp_l_elb = xp[l_elb_id] om_r_elb = om[r_elb_id] om_l_elb =", "in_out inputs and outputs class\\n') out_file.write(' *\\n') out_file.write(' * computation", "new_matrix = sp.zeros(3, 3) save_matrix = 9 * [None] for", "= np.array([0, # waist 2, 1, 3, 2, 1, 2,", "nb_contacts): Dpt_r_foot_cont[i] = sp.zeros(3, 1) Dpt_l_foot_cont[i] = sp.zeros(3, 1) Dpt_r_foot_cont[0][0]", "from IMU) # # Di : position vector from the", "sp.Symbol('L_2_19'), sp.Symbol('L_3_19')]) Dg[14] = sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')]) Dg[15] = sp.Matrix([sp.Symbol('L_1_21'),", "'M_17', 'M_18', # left leg 'M_19', 'M_20', 'M_21', # trunk", "0 out_file.write(',\\n {}'.format(get_string_enum(joint_id_names[i]))) else: out_file.write(', {}'.format(get_string_enum(joint_id_names[i]))) out_file.write('};\\n\\n') out_file.write('/*! \\\\brief main", "= xp[l_foot_id] om_r_foot = om[r_foot_id] om_l_foot = om[l_foot_id] R_r_foot =", "write_symb_vector(out_file, xp[i], 'xp{}_'.format(i+1), '') # jacobian x out_file.write('\\n // jacobian", "'x%a_%b' (indexes %a, %b also returned) def isVec(value): try: a", "f: cut_line = line.split(' = ') if len(cut_line) == 2:", "count >= nb_max_char: out_write.write(';\\n') count = 0 if count !=", "= vector[i] save_vector[i] = None else: elem_name = '{}{}{}'.format(start_name, i+1,", "om_tilde = nb_bodies*[None] Om[0] = sp.zeros(3, 1) om[0] = sp.zeros(3,", "elem_split[1] else: print('Error: {} instead of 1 or 2 in", "of xgi # omi : absolute rotational vector of body", "result # write the beginning of the file def write_file_beginning(out_file,", "}\\n\\n') out_file.write(' inv_c_y_Rfoot = 1.0 / c_y_Rfoot;\\n') out_file.write(' inv_c_y_Lfoot =", "R[parent_body_index[i]] write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[i], i+1) R[i] =", "2: result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[1].strip()) result +=", "1 return count # print the declaration of an element", "[vec_flag, d, e] = isVec(pos_str) # rotation matrix if rot_flag:", "y_min, y_max): # symbolic variables declarations nb_contacts = 4 x_r_foot", "= cos(in_out.theta_waist[2]);\\n') out_file.write(' c_z_torso = cos(in_out.theta_torso[2]);\\n\\n') out_file.write(' s_y_waist = sin(in_out.theta_waist[1]);\\n')", "== 2: return np.array([[0.0], [-elem], [0.0]]) elif axis == 3:", "sp.Symbol('L_2_6'), sp.Symbol('L_3_6')]) # right leg Dg[1] = sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7')", "orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0 for", "!= None: if not flag_first: flag_first = 1 flag_print =", "18 15 22 //\\n') out_file.write(' * // 19 14 23", "0 if count != 0: out_write.write(';\\n') # get tilde matrix", "try: a = int(value.split('_')[0].split('x')[1]) b = int(value.split('_')[1]) return True, a,", "7, 8, 9, 10, 11, # left leg 0, 13,", "j in range (1, nb_bodies): flag_print = 0 for k", "lines for line in f: cut_line = line.split(' = ')", "hip rotations\\n') out_file.write(' * inertial frame: located at the origin", "middle point between the two pitch hip rotations\\n') out_file.write(' *", "result = 0 # loop on all terms for cur_term", "-- //\\n') # Rj, xj, xgj and xgj (jacobian) Rj", "derivative) of the waist and the torso def torso_waist_angles(out_file, R,", "for i in range(1, len(cur_split)): new_string = '{}_{}'.format(new_string, cur_split[i].upper()) return", "= elem_split[0] elif cur_len == 2: # negative neg_flag =", "jacobian of 'xi' # xgji : jacobian of 'xgi' #", "for j in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj,", "22 # # 19 14 23 # # 20 01", "= '.format(get_string_enum(joint_id_names[i]), j)) flag_first = 0 for k in range(0,", "r_wrist_z Dpt_l_wrist[0] = r_wrist_x Dpt_l_wrist[1] = -r_wrist_y Dpt_l_wrist[2] = r_wrist_z", "inertial frame)\\n') out_file.write(' * of the anchor point of body", "# save the symbolic matrix for print def print_save_symb_matrix(matrix, start_name,", "i in range(0,3): out_file.write(' in_out.r_Rfoot[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write('", ": position vector from the anchor point of body i", "1) out_file.write('\\n') out_file.write(' c_y_waist = cos(in_out.theta_waist[1]);\\n') out_file.write(' c_y_torso = cos(in_out.theta_torso[1]);\\n')", "if count == 0: out_write.write(' double {}'.format(cut_line[0].strip())) else: out_write.write(', {}'.format(cut_line[0].strip()))", "previous body # Rdi : rotational matrix between body i", "absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Rfoot[{}] = {};\\n'.format(i,", "points absolute position\\n') for i in range(0, nb_contacts): for j", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Lfoot_or_der[{}][{}] =", "else: out_file.write('0.0;\\n') out_file.write('\\n') out_file.write(' // global com absolute velocity\\n') for", "elif axis == 3: return np.array([[0.0], [0.0], [-elem]]) else: return", "right foot jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0", "print_save_symb_matrix(matrix, start_name, end_name): new_matrix = sp.zeros(3, 3) save_matrix = 9", "'') xgp[i] = write_symb_vector(out_file, xgp[i], 'xgp{}_'.format(i+1), '') # jacobian xg", "i in range(0, nb_contacts): x_r_cont[i] = x_r_foot + R_r_foot.T *", "save_matrix[3*i+j] = ' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','') new_matrix[i,j] = sp.Symbol(elem_name)", "1: new_vector[i] = vector[i] else: flag_print = 1 elem_name =", "body i-1), expressed in the relative\\n') out_file.write(' * frame of", "out_file.write(' // right foot orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[r_foot_id],", "out_file.write(' }\\n\\n') out_file.write(' // left foot contact points absolute position\\n')", "matrix[i,j] else: flag_print = 1 elem_name = '{}{}{}{}'.format(start_name, i+1, j+1,", "Rd[0] = sp.zeros(3, 3) R[0] = sp.zeros(3, 3) for i", "cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_elb[j], 'q{}'.format(i+1)) if cur_jac !=", "nb_bodies*[None] Rj_print = nb_bodies*[None] xj_print = nb_bodies*[None] xgj_print = nb_bodies*[None]", "xgj, symb_var, der_var): # list of all terms term_list =", "r_wrist_x, r_wrist_y, r_wrist_z): # symbolic variables declarations x_r_elb = x[r_elb_id]", "out_write.write(';\\n') # get tilde matrix def get_tilde(v): return np.array([[0.0, -v[2],", "new_matrix[i,j] = matrix[i,j] else: flag_print = 1 elem_name = '{}{}{}{}'.format(start_name,", "but aligned with the ground (info from IMU) # #", "out_file.write(' *\\n') out_file.write(' * origin: in the waist, middle point", "-> pitch angle in [-pi/2 ; pi/2] out_file.write(' in_out.{}[0] =", "sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')]) Dpt[4] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')]) Dpt[5] =", "xgp, xgj): out_file.write(' m_tot = ') for i in range(0,", "out_file.write(' s_y_Lfoot = sin(in_out.theta_Lfoot[1]);\\n') out_file.write(' s_z_Rfoot = sin(in_out.theta_Rfoot[2]);\\n') out_file.write(' s_z_Lfoot", "'elem' in the file def count_elem(in_file, elem): count = 0;", "len(cut_line[0].strip()) + 2 if count >= nb_max_char: out_write.write(';\\n') count =", "j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // wrists absolute orientation\\n') for", "out_file.write('\\n') out_file.write(' in_out.r_Lfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write('", "out_file.write(' m_tot = ') for i in range(0, nb_bodies): out_file.write('{}'.format(M[i]))", "write_symb_vector(out_file, vector, start_name, end_name): new_vector = sp.zeros(3, 1) flag_print =", "joint_id_names[i])) out_file.write('\\n // joint sines\\n') for i in range(1, nb_bodies):", "4, 5, # right leg 0, 7, 8, 9, 10,", "in range(0,3): om[0][i] = sp.Symbol('omega_{}'.format(i+1)) om[0] = write_symb_vector(out_file, om[0], 'om1_',", "x_r[i])) out_file.write('\\n') out_file.write(' // right wrist absolute velocity\\n') for i", "'LeftHipPitch_id' , 'LeftHipRoll_id' , 'LeftHipYaw_id' , 'LeftKneePitch_id' , 'LeftFootRoll_id' ,", "out_file.write(' * // 04 10 //\\n') out_file.write(' * // 05", "derivative of xgi\\n') out_file.write(' * omi : absolute rotational vector", "elem_str = elem_str.replace('- ','-').strip() # derivative axis der_q = int(der_var.replace('q',''))", "nb_bodies*[None] Rj_print[i] = nb_bodies*[None] xj_print[i] = nb_bodies*[None] xgj_print[i] = nb_bodies*[None]", "i in range(0,3): om[0][i] = sp.Symbol('omega_{}'.format(i+1)) om[0] = write_symb_vector(out_file, om[0],", "out_file.write('\\n') out_file.write('{}'.format(xgj_print[i][j][k])) out_file.write(' }\\n') # results out_file.write('\\n // -- Collecting", "sp.Symbol('L_3_9')]) Dg[4] = sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')]) Dg[5] = sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'),", "= np.array(['0', # waist 'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id',", "Dg, M): # temporary file in_temp = './{}_temp.cc'.format(out_file_name) file_temp =", "1 -> pitch angle in [-pi/2 ; pi/2] out_file.write(' in_out.{}[0]", "matrix\\n') out_file.write(' * Rti : transpose matrix of Ri\\n') out_file.write('", "omega_in[1], body_part, omega_in[0])) out_file.write(' in_out.{}[2] = inv_c_y_{} * s_y_{} *", "range(0,3): out_file.write(' in_out.rp_Lwrist[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' // left", "- rotation matrices\\n') for i in range(0, 3): for j", "get_vector_axis(rot_axis[i], 1, sp.Symbol('Om{}'.format(i+1))) om[i] = om[parent_id] + Rt[parent_id] * Om[i]", "return true if it has a shape 'x%a_%b' (indexes %a,", "24 # # 02 08 # # 03 09 #", "the derivative of an expression (for jacobian) def symbolic_jacob_der(Rj, xj,", "ground (info from IMU)\\n') out_file.write(' *\\n') out_file.write(' * Di :", "xgi # omi : absolute rotational vector of body i", "a shape 'x%a_%b' (indexes %a, %b also returned) def isVec(value):", "the relative # frame of the previous body # DGi", "[elem], [0.0]]) elif axis == 3: return np.array([[0.0], [0.0], [elem]])", "== 1: out_file.write('{}'.format(get_string_enum(joint_id_names[i]))) elif count >= 6: count = 0", "on all the joints for i in range (1, nb_bodies):", "also returned) def isRot(value): try: a = int(value.split('_')[0].split('R')[1]) b =", "out_write, elem, nb_max_line): if count_elem(in_file, '{}'.format(elem)) >= 1: count =", "der_elem(cur_term_split[0], Rj, xj, xgj, der_var) # one product elif cur_len", "*\\n') out_file.write(' * ////////////////////////\\n') out_file.write(' * // //\\n') out_file.write(' *", "// global com jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') for i", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Rwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j,", "flag_print: out_file.write('\\n') return new_vector # write symbolic matrix and replace", "nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M): # temporary file", "in range(0, nb_contacts): for j in range(0, 3): out_file.write(' in_out.r_Rfoot_cont[{}][{}]", "variable by its name def write_symb_matrix(out_file, matrix, start_name, end_name): new_matrix", "s_y_Lfoot = sin(in_out.theta_Lfoot[1]);\\n') out_file.write(' s_z_Rfoot = sin(in_out.theta_Rfoot[2]);\\n') out_file.write(' s_z_Lfoot =", "velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Lfoot[{}] = {};\\n'.format(i, xp_l[i]))", "sines\\n') for i in range(1, nb_bodies): out_file.write(' s{} = sin(in_out.q_mot[{}]);\\n'.format(i+1,", "= sp.zeros(3, 1) Dpt_r_foot_cont[0][0] = x_min Dpt_r_foot_cont[1][0] = x_min Dpt_r_foot_cont[2][0]", "range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r_cont[i][k], 'q{}'.format(j+1)) if", "out_file.write(';\\n') else: out_file.write(', ') # variables initialization def write_intialization(out_file, nb_bodies,", "i-1), expressed in the relative\\n') out_file.write(' * frame of the", "j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_elb[j], 'q{}'.format(i+1))", "# save the symbolic vector for print def print_save_symb_vector(vector, start_name,", "elif axis == 2: return np.array([[0.0], [-elem], [0.0]]) elif axis", "om[parent_id] + Rt[parent_id] * Om[i] om[i] = write_symb_vector(out_file, om[i], 'om{}_'.format(i+1),", "file def write_file_beginning(out_file, joint_id_names): out_file.write('/*! \\n') out_file.write(' * \\\\author <NAME>\\n')", "0 pos_str = elem_split[0] elif cur_len == 2: # negative", "sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')]) Dg[10] = sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')]) Dg[11] =", "j in range(0, 3): out_file.write(' in_out.r_COM_der[{}][{}] = '.format(get_string_enum(joint_id_names[i]), j)) flag_first", "flag_print = 0 for k in range(0, 3): if xgj_print[i][j][k]", "= int(value.split('_')[1]) return True, a, b except: return False, -1,", "get_tilde(om_l_elb) x_r = x_r_elb + R_r_elb.T * Dpt_r_wrist x_l =", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xj_print[i][j][k])) out_file.write('", "out_file.write('enum {') count = 0 for i in range(1, len(joint_id_names)):", "jacobian) def der_elem(elem_str, Rj, xj, xgj, der_var): # element to", "file out_file = open('./{}.cc'.format(out_file_name), 'w') with open(in_temp, 'r') as f:", "1, 3, 2 # left arm ]) # parent index", "start_name, end_name): new_matrix = sp.zeros(3, 3) flag_print = 0 for", "in range (1, nb_bodies): new_vector = sp.zeros(3, 1) # loop", "compute the time derivatives of 'yaw_pitch_roll_angles' def theta_dot_compute(out_file, omega_in, omega_out,", "# epsilon = 1 -> pitch angle in [-pi/2 ;", "'om1_', '') om_tilde[0] = get_tilde(om[0]) for i in range(1, nb_bodies):", "15) # generate the symbolic output file def gen_symbolic_out(out_file_name, nb_bodies,", "(from origin, expressed in the inertial frame) # of the", "shape 'R%a_%b%c' (indexes %a, %b, %c also returned) def isRot(value):", "or matrix[i,j] == 1: new_matrix[i,j] = matrix[i,j] else: flag_print =", ": jacobian of 'xi' # xgji : jacobian of 'xgi'", "on all the vector elements for j in range(0, 3):", "wrist absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Rwrist[{}] =", "sp.Symbol('DPT_3_48')]) Dpt[23] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')]) ## COM positions Dg", "detect products cur_term_split = cur_term.split('*') cur_len = len(cur_term_split) # no", "position vector of the COM G_i of body i #", "= symbolic_jacob_der(Rj, xj, xgj, x_l_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0:", "in range(0,3): out_file.write(' in_out.r_Rwrist[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' //", "compute the derivative of an element (for jacobian) def der_elem(elem_str,", "write symbolic vector and replace symbolic variable by its name", "= 1 pos_str = elem_split[1] else: print('Error: {} instead of", "om_tilde_l_foot * (R_l_foot.T * Dpt_l_foot) for i in range(0, nb_contacts):", "of the COM G_i of body i # xpi :", "= get_tilde(om[i]) # x & xp out_file.write('\\n // anchor point", "negative neg_flag = 1 pos_str = elem_split[1] else: print('Error: {}", "1: if count == 0: out_write.write(' double {}'.format(cut_line_2[0].strip())) else: out_write.write(',", "import numpy as np import sympy as sp import re", "of \\'xgi\\'\\n') out_file.write(' * Rji : jacobian of \\'Ri\\'\\n') out_file.write('", "contact points absolute position\\n') for i in range(0, nb_contacts): for", "the joints for i in range (1, nb_bodies): new_vector =", "+= der_elem(cur_term_split[0], Rj, xj, xgj, der_var) # one product elif", "R, om, waist_id, torso_id): out_file.write(' // waist orientation matrix as", "y_min Dpt_r_foot_cont[1][1] = y_max Dpt_r_foot_cont[2][1] = y_min Dpt_r_foot_cont[3][1] = y_max", "out_file.write('\\n // IMU - angles velocity\\n') for i in range(0,", "= {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left wrist", "# write symbolic vector and replace symbolic variable by its", "write_symb_vector(out_file, om[0], 'om1_', '') om_tilde[0] = get_tilde(om[0]) for i in", ", 'TorsoPitch_id' , 'TorsoYaw_id' , # trunk 'RightShPitch_id' , 'RightShRoll_id'", "= symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xj[index-1][i], xj_print[index-1][i]] = print_save_symb_vector(new_vector,", "out_file.write(' }\\n\\n') out_file.write(' // left foot absolute position\\n') for i", "has a shape 'R%a_%b%c' (indexes %a, %b, %c also returned)", "xji : jacobian of 'xi' # xgji : jacobian of", "+= der_elem(cur_term_split[0], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[1].strip()) result += der_elem(cur_term_split[1], Rj,", "print('Error: {} instead of 1 or 2 in negative detection", "out_file.write(' + {}*{}'.format(M[j], xg[j][i])) else: flag_first = 1 out_file.write('({}*xg{}_{}'.format(M[j], j+1,", "angles velocity\\n') for i in range(0, 3): out_file.write(' omega_{} =", "0.0], [0.0, 0.0, 1.0]]) else: return np.array([]) # get vector", "an expression (for jacobian) def symbolic_jacob_der(Rj, xj, xgj, symb_var, der_var):", "Rt[0] = R[0].T for i in range(1, nb_bodies): Rd[i] =", "symbolic_jacob_der(Rj, xj, xgj, R_matrix[j], 'q{}'.format(i+1)) [Rj[index-1][i], Rj_print[index-1][i]] = print_save_symb_matrix(new_matrix, 'R{}_'.format(index),", "Dpt_r_foot_cont[0][0] = x_min Dpt_r_foot_cont[1][0] = x_min Dpt_r_foot_cont[2][0] = x_max Dpt_r_foot_cont[3][0]", "out_file.write(' * // //\\n') out_file.write(' * // 17 16 21", "xgp[i], 'xgp{}_'.format(i+1), '') # jacobian xg out_file.write('\\n // jacobian com", "out_file.write(' double ') for i in range(0,3): for j in", "flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') if i != nb_bodies-1: out_file.write('\\n') else:", "pitch angle in [-pi/2 ; pi/2] out_file.write(' in_out.{}[0] = atan2({},", "###################### # # # 17 16 21 # # 18", "'yaw_pitch_roll_angles' def theta_dot_compute(out_file, omega_in, omega_out, body_part): out_file.write(' in_out.{}[0] = inv_c_y_{}", "l_foot_id, x_min, x_max, y_min, y_max): # symbolic variables declarations nb_contacts", "9): out_file.write(' in_out.Rfoot_or[{}] = {};\\n'.format(i, R_r_foot[i])) out_file.write('\\n') for i in", "10 # # 05 11 # # 06 12 #", "xp[r_elb_id] xp_l_elb = xp[l_elb_id] om_r_elb = om[r_elb_id] om_l_elb = om[l_elb_id]", "Dpt_r_foot_cont = nb_contacts * [None] Dpt_l_foot_cont = nb_contacts * [None]", "xj, xgj and xgj (jacobian) Rj = nb_bodies*[None] xj =", "COM positions Dg = nb_bodies*[None] # waist Dg[0] = sp.Matrix([sp.Symbol('L_1_6'),", "+= 1 return count # print the declaration of an", "out_file.write(' * \\\\author <NAME>\\n') out_file.write(' * \\\\file forward_kinematics.cc\\n') out_file.write(' *", "= y_min Dpt_r_foot_cont[1][1] = y_max Dpt_r_foot_cont[2][1] = y_min Dpt_r_foot_cont[3][1] =", "an element def print_declaration_elem(in_file, out_write, elem, nb_max_line): if count_elem(in_file, '{}'.format(elem))", "cosine, sine], [0.0, -sine, cosine]]) elif axis == 2: return", "cos(in_out.theta_torso[1]);\\n') out_file.write(' c_z_waist = cos(in_out.theta_waist[2]);\\n') out_file.write(' c_z_torso = cos(in_out.theta_torso[2]);\\n\\n') out_file.write('", "0, 7, 8, 9, 10, 11, # left leg 0,", "in the relative frame of the current body i\\n') out_file.write('", ", 'LeftKneePitch_id' , 'LeftFootRoll_id' , 'LeftFootPitch_id' , # left leg", "cos(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint sines\\n') for i in range(1,", "3*pi/2] out_file.write(' in_out.{}[0] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1]", "return true if it is a float def isInt(value): try:", "= x[r_foot_id] x_l_foot = x[l_foot_id] xp_r_foot = xp[r_foot_id] xp_l_foot =", "start_name, end_name): new_vector = sp.zeros(3, 1) save_vector = 3 *", "+ R_l_foot.T * Dpt_l_foot xp_r = xp_r_foot + om_tilde_r_foot *", "if len(cut_line_1) == 2 and len(cut_line_2) == 2: if len(cut_line_2[0].split('['))", "x[0] = Rt[0] * Dpt[0] xp[0] = om_tilde[0] * (Rt[0]", "# from an orientation matrix, compute the roll, pitch, yaw", "range(0, 3): out_file.write(' in_out.r_Rfoot_cont[{}][{}] = {};\\n'.format(i, j, x_r_cont[i][j])) out_file.write('\\n') out_file.write('", "cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r_cont[i][k], 'q{}'.format(j+1)) if cur_jac !=", "Dpt_r_wrist x_l = x_l_elb + R_l_elb.T * Dpt_l_wrist xp_r =", "Dg[5] = sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')]) Dg[6] = sp.Matrix([sp.Symbol('L_1_12'), 0.0 ,", "sp.Symbol('L_3_11')]) Dg[6] = sp.Matrix([sp.Symbol('L_1_12'), 0.0 , sp.Symbol('L_3_12')]) # left leg", "1: return np.array([[elem], [0.0], [0.0]]) elif axis == 2: return", "xg[i], 'xg{}_'.format(i+1), '') xgp[i] = write_symb_vector(out_file, xgp[i], 'xgp{}_'.format(i+1), '') #", "in range(0, 3): new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1))", "= get_rotation_matrix(rot_axis[i], 1, sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1))) R[i] = Rd[i] * R[parent_body_index[i]]", "right foot orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[r_foot_id], 'omega_Rfoot', 'Rfoot')", "for i in range(0,3): out_file.write(' in_out.r_Rfoot[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n')", "string for the enumeration of joints def get_string_enum(cur_string): cur_split =", "sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')]) Dpt[17] = sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0]) Dpt[18] =", "in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_foot[j], 'q{}'.format(i+1)) if", "j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // right foot orientation matrix", "xgj and xgj (jacobian) Rj = nb_bodies*[None] xj = nb_bodies*[None]", "y_max): # symbolic variables declarations nb_contacts = 4 x_r_foot =", "omega_out, body_part): out_file.write(' in_out.{}[0] = inv_c_y_{} * (c_z_{}*{} + s_z_{}*{});\\n'.format(omega_out,", "elements for j in range(0, 9): new_matrix[j] = symbolic_jacob_der(Rj, xj,", "= 0 # cosine if pos_str == 'c{}'.format(der_q): result +=", "len(line.split('// -- variables initialization -- //')) != 1: out_file.write(' //", "<filename>symbolicR/python/forward_kin.py import numpy as np import sympy as sp import", "j in range(0, 3): out_file.write(' in_out.r_Rfoot_cont[{}][{}] = {};\\n'.format(i, j, x_r_cont[i][j]))", "parent_body_index, Dpt, Dg, M) # end of the file write_file_end(file_temp)", "range(0,3): out_file.write(' in_out.r_Rfoot[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' // right", "IMU) # # Di : position vector from the anchor", "point positions\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0 for", "= filter(None, re.split(\"([A-Z][^A-Z]*)\", new_string)) new_string = cur_split[0].upper() for i in", "name def write_symb_matrix(out_file, matrix, start_name, end_name): new_matrix = sp.zeros(3, 3)", "# Omi : rotational vector from the previous body to", "i+1)) else: flag_first = 1 out_file.write('({}*xgp{}_{}'.format(M[j], j+1, i+1)) if j", "= sp.zeros(3, 3) for i in range(0, 3): for j", "in range(0, nb_bodies-1): Rj[i][j] = sp.zeros(3, 3) xj[i][j] = sp.zeros(3,", "= inv_c_y_{} * s_y_{} * (s_z_{}*{} + c_z_{}*{}) + {};\\n'.format(omega_out,", "# # 20 01 24 # # 02 08 #", "om_tilde_l_elb * (R_l_elb.T * Dpt_l_wrist) # writing outputs out_file.write(' //", "sp.Symbol('L_3_29')]) # masses M = np.array([ 'M_6', # waist 'M_7'", "'LeftFootPitch_id' , # left leg 'TorsoRoll_id' , 'TorsoPitch_id' , 'TorsoYaw_id'", "3:z) rot_axis = np.array([0, # waist 2, 1, 3, 2,", "i in range(0, nb_contacts): Dpt_r_foot_cont[i] = sp.zeros(3, 1) Dpt_l_foot_cont[i] =", "j in range(0, 9): new_matrix[j] = symbolic_jacob_der(Rj, xj, xgj, R_matrix[j],", "xj, xgj, x_r[j], 'q{}'.format(i+1)) if cur_jac != 0: if not", "04 10 # # 05 11 # # 06 12", "}\\n\\n') out_file.write(' // right foot orientation matrix as angles [rad]\\n')", "for line in f: # declaration if len(line.split('// -- variables", "-1 # return true if it has a shape 'x%a_%b'", "derivatives [rad/s]\\n') theta_dot_compute(out_file, om[waist_id], 'omega_waist', 'waist') out_file.write('\\n') out_file.write(' // torso", "remove temporary file os.remove(in_temp) # main script # rotation axis", "'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id', # right leg 'LeftHipPitch_id' , 'LeftHipRoll_id'", "nb_bodies*[None] xgj[i] = nb_bodies*[None] Rj_print[i] = nb_bodies*[None] xj_print[i] = nb_bodies*[None]", "(center of mass) G_i,\\n') out_file.write(' * expressed in the relative", "= {};\\n'.format(i, j, x_r_cont[i][j])) out_file.write('\\n') out_file.write(' // right foot contact", "out_file.write('\\n') out_file.write(' in_out.r_Rfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write('", "+= -sp.Symbol('s{}'.format(der_q)) # sine elif pos_str == 's{}'.format(der_q): result +=", "return np.array([[-elem], [0.0], [0.0]]) elif axis == 2: return np.array([[0.0],", "out_file.write(' * (previous body is not always body i-1), expressed", "in_out.r_Rfoot_cont[{}][{}] = {};\\n'.format(i, j, x_r_cont[i][j])) out_file.write('\\n') out_file.write(' // right foot", "len(cut_line) == 2: count += 1 return count # print", "15 22 //\\n') out_file.write(' * // 19 14 23 //\\n')", "= write_symb_vector(out_file, om[0], 'om1_', '') om_tilde[0] = get_tilde(om[0]) for i", "= parent_body_index[i] x[i] = x[parent_id] + Rt[parent_id] * Dpt[i] xp[i]", "* (c_z_{}*{} + s_z_{}*{});\\n'.format(omega_out, body_part, body_part, omega_in[0], body_part, omega_in[1])) out_file.write('", "i in range(0, 9): out_file.write(' in_out.Rwrist_or[{}] = {};\\n'.format(i, R_r_elb[i])) out_file.write('\\n')", "of body i\\n') out_file.write(' * Ri : absolute rotational matrix\\n')", "for i in range(1, nb_bodies): parent_id = parent_body_index[i] Om[i] =", "18, # right arm 15, 20, 21, 22 # left", "M = np.array([ 'M_6', # waist 'M_7' , 'M_8' ,", "out_file.write(' in_out.rp_Rfoot[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' // right foot", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Rwrist_or_der[{}][{}]", "13, 14, # trunk 15, 16, 17, 18, # right", "if not flag_first: flag_first = 1 flag_print = 1 elif", "out_file.write('\\n // rotation matrices\\n') R = nb_bodies*[None] Rt = nb_bodies*[None]", "cosine, sine): if direct: if axis == 1: return np.array([[1.0,", "sin(in_out.theta_waist[2]);\\n') out_file.write(' s_z_torso = sin(in_out.theta_torso[2]);\\n\\n') out_file.write(' if ((!c_y_waist) || (!c_y_torso))\\n", "1 out_file.write('\\n') out_file.write(' in_out.Rfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n')", "/ c_y_Lfoot;\\n\\n') out_file.write(' // right foot orientation angle derivatives [rad/s]\\n')", "print matrix components declaration def write_matrix_declaration(out_file, prefix): out_file.write(' double ')", "body i to its COM (center of mass) G_i, #", "Dpt_r_foot[2] = sp.Symbol('DPT_3_16') Dpt_l_foot[2] = sp.Symbol('DPT_3_29') Dpt_r_foot_cont = nb_contacts *", "16 21 //\\n') out_file.write(' * // 18 15 22 //\\n')", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Rwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]),", "in_out.{}[2] = inv_c_y_{} * s_y_{} * (s_z_{}*{} + c_z_{}*{}) +", "j in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l[j],", "xgp = nb_bodies*[None] for i in range(0, nb_bodies): xg[i] =", ": jacobian of \\'xgi\\'\\n') out_file.write(' * Rji : jacobian of", "{};\\n'.format(i, j, x_r_cont[i][j])) out_file.write('\\n') out_file.write(' // right foot contact points", "out_file.write(' // left wrist absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n", "copy temporary file out_file.write(line) out_file.close() # remove temporary file os.remove(in_temp)", "def write_variables_declaration(out_file, prefix, min, max): out_file.write(' double ') for i", "of the previous body # DGi : position vector from", "# positive neg_flag = 0 pos_str = elem_split[0] elif cur_len", "= 0 for i in range(1, len(joint_id_names)): count += 1", "sp.Matrix([0.0, 0.0, 0.0]) # right leg Dpt[1] = sp.Matrix([0.0, sp.Symbol('DPT_2_2'),", "x_r_cont[i] = x_r_foot + R_r_foot.T * Dpt_r_foot_cont[i] x_l_cont[i] = x_l_foot", "= sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')]) Dg[6] = sp.Matrix([sp.Symbol('L_1_12'), 0.0 , sp.Symbol('L_3_12')])", "absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Lwrist[{}] = {};\\n'.format(i,", "k in range(0, 9): if Rj_print[i][j][k] != None: if not", "1) Dpt_l_wrist = sp.zeros(3, 1) Dpt_r_wrist[0] = r_wrist_x Dpt_r_wrist[1] =", "{\\n') out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_waist = 1.0 / c_y_waist;\\n')", "in range(0, 3): out_file.write(' omega_{} = in_out.IMU_Angular_Rate[{}];\\n'.format(i+1, i)) out_file.write('\\n //", "= ' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','') new_matrix[i,j] = sp.Symbol(elem_name) return", "xj_print, x_vector, index): # loop on all the joints for", "Dpt_l_foot_cont[i] = sp.zeros(3, 1) Dpt_r_foot_cont[0][0] = x_min Dpt_r_foot_cont[1][0] = x_min", "print variables declaration def write_variables_declaration(out_file, prefix, min, max): out_file.write(' double", "Rti : transpose matrix of Ri\\n') out_file.write(' * xji :", "in the relative\\n') out_file.write(' * frame of the previous body\\n')", "= sp.Matrix([0.0, 0.0, 0.0]) Dpt[15] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')]) #", "* // 03 09 //\\n') out_file.write(' * // 04 10", "//\\n') out_file.write(' * // 04 10 //\\n') out_file.write(' * //", "arm Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')]) Dpt[21] = sp.Matrix([0.0, sp.Symbol('DPT_2_46'),", "transpose matrix of Ri\\n') out_file.write(' * xji : jacobian of", "[0.0, cosine, -sine], [0.0, sine, cosine]]) elif axis == 2:", "c_y_Lfoot = cos(in_out.theta_Lfoot[1]);\\n') out_file.write(' c_z_Rfoot = cos(in_out.theta_Rfoot[2]);\\n') out_file.write(' c_z_Lfoot =", "symbolic vector and replace symbolic variable by its name def", "// left foot contact points absolute position\\n') for i in", "angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Rfoot', R[r_foot_id], 1) out_file.write('\\n') out_file.write(' // left", "flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))", "symbolic matrix and replace symbolic variable by its name def", "nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M): out_file.write('\\n\\n // --", "# left leg 'M_19', 'M_20', 'M_21', # trunk 'M_22', 'M_23',", "0, 1, 2, 3, 4, 5, # right leg 0,", "position\\n') for i in range(0, 3): out_file.write(' in_out.r_COM[{}] = '.format(i))", "matrices\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0 for i", "nb_bodies*[None] # waist Dg[0] = sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')]) # right", "R[l_foot_id], 1) out_file.write('\\n') out_file.write(' c_y_Rfoot = cos(in_out.theta_Rfoot[1]);\\n') out_file.write(' c_y_Lfoot =", "nb_contacts * [None] for i in range(0, nb_contacts): Dpt_r_foot_cont[i] =", "-- variables initialization -- //\\n') out_file.write('\\n // IMU - rotation", "save_vector # save the symbolic matrix for print def print_save_symb_matrix(matrix,", "xgj[i] = nb_bodies*[None] Rj_print[i] = nb_bodies*[None] xj_print[i] = nb_bodies*[None] xgj_print[i]", "right leg Dg[1] = sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7') , sp.Symbol('L_3_7')]) Dg[2]", "True, a, b, c except: return False, -1, -1, -1", "[None] x_l_cont = nb_contacts * [None] # computation om_tilde_r_foot =", "1 out_file.write('\\n') out_file.write(' in_out.Lwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n')", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(Rj_print[i][j][k])) out_file.write(' }\\n') # omega", "'M_18', # left leg 'M_19', 'M_20', 'M_21', # trunk 'M_22',", "symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1)) if cur_jac != 0: if", "angles and derivatives\\n') out_file.write(' *\\n') out_file.write(' * ////////////////////////\\n') out_file.write(' *", "r_foot_id, l_foot_id, x_min, x_max, y_min, y_max): # symbolic variables declarations", "return np.array([[elem], [0.0], [0.0]]) elif axis == 2: return np.array([[0.0],", "i in range(1, nb_bodies): out_file.write(' c{} = cos(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n", "3): for j in range(0, 3): R[0][i,j] = sp.Symbol('IMU{}{}'.format(i+1, j+1))", "rotation matrices\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0 for", "x_l = x_l_foot + R_l_foot.T * Dpt_l_foot xp_r = xp_r_foot", "out_file.write(' * inertial frame: located at the origin (waist), but", "flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))", "anchor point of the previous body to the current body", "return np.array([[0.0], [elem], [0.0]]) elif axis == 3: return np.array([[0.0],", "# detect products cur_term_split = cur_term.split('*') cur_len = len(cur_term_split) #", "term_list = str(symb_var).replace('- ','-').replace('-','+-').split('+') if term_list[0] == '': term_list.pop(0) result", "out_file.write('({}*xg{}_{}'.format(M[j], j+1, i+1)) if j == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n')", "flag_print = 0 for j in range(0, 3): cur_jac =", "for i in range(0,3): for j in range(0,3): if matrix[i,j]", "Rd[i] = get_rotation_matrix(rot_axis[i], 1, sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1))) R[i] = Rd[i] *", "= cos(in_out.theta_Rfoot[2]);\\n') out_file.write(' c_z_Lfoot = cos(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' s_y_Rfoot = sin(in_out.theta_Rfoot[1]);\\n')", "%a, %b, %c also returned) def isRot(value): try: a =", "in_out.rp_Rfoot[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' // right foot jacobian\\n')", "out_file, 100) out_file.write('\\n\\n') # copy temporary file out_file.write(line) out_file.close() #", "else: out_file.write(', ') # print variables declaration def write_variables_declaration(out_file, prefix,", "as f: # loop on all the lines for line", "cosine]]) elif axis == 3: return np.array([[cosine, -sine, 0.0], [sine,", "angle in [-pi/2 ; pi/2] out_file.write(' in_out.{}[0] = atan2({}, {});\\n'.format(angle_name,", "symbolic jacobian of a com point def write_symb_xgj(nb_bodies, Rj, xj,", "x out_file.write('\\n // jacobian anchor point positions\\n') out_file.write(' if (flag_jacob)\\n", "+= xj[d-1][der_q-1][e-1] # apply negative if neg_flag: result = -result", "if axis == 1: return np.array([[1.0, 0.0, 0.0], [0.0, cosine,", "in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1))", "sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')]) Dg[18] = sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')]) Dg[19] =", "yaw_pitch_roll_angles(out_file, 'theta_Lfoot', R[l_foot_id], 1) out_file.write('\\n') out_file.write(' c_y_Rfoot = cos(in_out.theta_Rfoot[1]);\\n') out_file.write('", "for k in range(0, nb_bodies): if xgj[k][i][j] != 0: if", "epsilon > 0: # epsilon = 1 -> pitch angle", "0.0, sp.Symbol('DPT_3_22')]) Dpt[11] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')]) Dpt[12] = sp.Matrix([0.0,", "for i in range(1, len(joint_id_names)): count += 1 if i", "its name def write_symb_matrix(out_file, matrix, start_name, end_name): new_matrix = sp.zeros(3,", "= len(elem_split) if cur_len == 1: # positive neg_flag =", "= 'forward_kinematics' gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M)", "write_symb_matrix(out_file, R[i], 'R{}_'.format(i+1), '') Rt[i] = R[i].T # jacobian rotation", "in f: cut_line = line.split(' = ') if len(cut_line) ==", "in range(0, nb_bodies): if xgj[k][i][j] != 0: if flag_first: out_file.write('", "out_file.write(' s_z_Lfoot = sin(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' if ((!c_y_Rfoot) || (!c_y_Lfoot))\\n {\\n')", "for i in range(1, nb_bodies): out_file.write(' c{} = cos(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i]))", "out_file.write(' * Omi : rotational vector from the previous body", "# rotation matrices out_file.write('\\n // rotation matrices\\n') R = nb_bodies*[None]", "i in range (1, nb_bodies): new_vector = sp.zeros(3, 1) #", "joint_id_names, Dpt, Dg, M): # temporary file in_temp = './{}_temp.cc'.format(out_file_name)", "nb_bodies*[None] for i in range(0, nb_bodies): Rj[i] = nb_bodies*[None] xj[i]", "Dpt_l_foot = sp.zeros(3, 1) Dpt_r_foot[2] = sp.Symbol('DPT_3_16') Dpt_l_foot[2] = sp.Symbol('DPT_3_29')", "script # rotation axis for each joint before body i", "out_file.write('{}{}{}'.format(prefix, i+1, j+1)) if i == 2 and j ==", "* (R_r_foot.T * Dpt_r_foot) xp_l = xp_l_foot + om_tilde_l_foot *", "joint sines\\n') for i in range(1, nb_bodies): out_file.write(' s{} =", "/ c_y_waist;\\n') out_file.write(' inv_c_y_torso = 1.0 / c_y_torso;\\n\\n') out_file.write(' //", "x_r_elb = x[r_elb_id] x_l_elb = x[l_elb_id] xp_r_elb = xp[r_elb_id] xp_l_elb", "# left arm ]) nb_bodies = len(parent_body_index) ## anchor point", "axis == 3: return np.array([[0.0], [0.0], [elem]]) else: return np.array([])", "# frame of the previous body # DGi : position", "xg[i], i+1) xg[i] = write_symb_vector(out_file, xg[i], 'xg{}_'.format(i+1), '') xgp[i] =", "// 02 08 //\\n') out_file.write(' * // 03 09 //\\n')", "in_out.rp_COM[{}] = '.format(i)) flag_first = 0 for j in range(0,", "in range(0, nb_bodies): out_file.write('{}'.format(M[i])) if i == nb_bodies-1: out_file.write(';\\n\\n') else:", "+ R_l_foot.T * Dpt_l_foot_cont[i] # writing outputs out_file.write(' // right", "(flag_jacob)\\n {\\n') flag_first = 0 for i in range (1,", "== '': term_list.pop(0) result = 0 # loop on all", "# get a string for the enumeration of joints def", "* origin: in the waist, middle point between the two", "in range(min, max+1): out_file.write('{}{}'.format(prefix, i)) if i == max: out_file.write(';\\n')", "k, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // feet absolute orientation\\n') for", "sin(in_out.theta_Rfoot[2]);\\n') out_file.write(' s_z_Lfoot = sin(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' if ((!c_y_Rfoot) || (!c_y_Lfoot))\\n", "+ Rt[parent_id] * Om[i] om[i] = write_symb_vector(out_file, om[i], 'om{}_'.format(i+1), '')", "open(in_temp, 'w') # beginning of the file write_file_beginning(file_temp, joint_id_names) #", "# right leg 2, 1, 3, 2, 1, 2, #", "k in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l_cont[i][k],", "xj, xgj, der_var)*sp.Symbol(cur_term_split[1].strip()) result += der_elem(cur_term_split[1], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[0].strip())", "= 1 out_file.write('\\n') out_file.write(' in_out.Rfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write('", "temporary file out_file.write(line) out_file.close() # remove temporary file os.remove(in_temp) #", "= sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')]) Dpt[19] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')]) #", "elif axis == 2: return np.array([[0.0], [elem], [0.0]]) elif axis", "wrists absolute orientation\\n') for i in range(0, 9): out_file.write(' in_out.Rwrist_or[{}]", "02 08 //\\n') out_file.write(' * // 03 09 //\\n') out_file.write('", "nb_bodies*[None] Om[0] = sp.zeros(3, 1) om[0] = sp.zeros(3, 1) for", "out_file.write(' * computation of:\\n') out_file.write(' * COM (center of mass)", "1: count = 0 with open(in_file,'r') as f: # loop", "symbolic_jacob_der(Rj, xj, xgj, symb_var, der_var): # list of all terms", "arm Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')]) Dpt[17] = sp.Matrix([0.0, sp.Symbol('DPT_2_39'),", "for i in range(0, 9): out_file.write(' in_out.Rfoot_or[{}] = {};\\n'.format(i, R_r_foot[i]))", "+ s_z_{}*{});\\n'.format(omega_out, body_part, body_part, omega_in[0], body_part, omega_in[1])) out_file.write(' in_out.{}[1] =", "sp.zeros(3, 3) save_matrix = 9 * [None] for i in", "return np.array([]) else: if axis == 1: return np.array([[-elem], [0.0],", "right foot absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "'x{}_'.format(i+1), '') xp[i] = write_symb_vector(out_file, xp[i], 'xp{}_'.format(i+1), '') # jacobian", "return;\\n }\\n\\n') out_file.write(' inv_c_y_Rfoot = 1.0 / c_y_Rfoot;\\n') out_file.write(' inv_c_y_Lfoot", "flag_print = 0 for i in range(0,3): if vector[i] ==", "20 01 24 # # 02 08 # # 03", "* Dpt_r_foot x_l = x_l_foot + R_l_foot.T * Dpt_l_foot xp_r", "= nb_bodies*[None] Rt = nb_bodies*[None] Rd = nb_bodies*[None] Rd[0] =", "//\\n') out_file.write(' * // 06 12 //\\n') out_file.write(' * //", "!= 1: out_file.write(' // -- variables declaration -- //\\n\\n') print_all_declaration(in_temp,", "## anchor point positions Dpt = nb_bodies*[None] # waist Dpt[0]", "y_max Dpt_r_foot_cont[2][1] = y_min Dpt_r_foot_cont[3][1] = y_max for i in", "xj_print[index-1][i]] = print_save_symb_vector(new_vector, 'x{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian of", "write_symb_vector(out_file, xp[0], 'xp1_', '') for i in range(1, nb_bodies): parent_id", "0: out_write.write(' double {}'.format(cut_line[0].strip())) else: out_write.write(', {}'.format(cut_line[0].strip())) count += len(cut_line[0].strip())", "// 03 09 //\\n') out_file.write(' * // 04 10 //\\n')", "Dpt_r_wrist[0] = r_wrist_x Dpt_r_wrist[1] = r_wrist_y Dpt_r_wrist[2] = r_wrist_z Dpt_l_wrist[0]", "joints for i in range (1, nb_bodies): new_matrix = sp.zeros(3,", "4 x_r_foot = x[r_foot_id] x_l_foot = x[l_foot_id] xp_r_foot = xp[r_foot_id]", "= 1 out_file.write('\\n') out_file.write(' in_out.r_Lwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write('", "& xgp out_file.write('\\n // com absolute positions and velocities\\n') xg", "= sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9') , sp.Symbol('L_3_9')]) Dg[4] = sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'),", "rotational vector from the previous body to the current body", "else: # epsilon = -1 -> pitch angle in [pi/2", "[-sine, 0.0, cosine]]) elif axis == 3: return np.array([[cosine, -sine,", "* // 20 01 24 //\\n') out_file.write(' * // 02", "[0.0, 1.0, 0.0], [-sine, 0.0, cosine]]) elif axis == 3:", "count += 1 if i == 1: out_file.write('{}'.format(get_string_enum(joint_id_names[i]))) elif count", "y_max for i in range(0, nb_contacts): Dpt_r_foot_cont[i][2] = sp.Symbol('DPT_3_16') for", "def write_file_end(out_file): out_file.write('}\\n') # print matrix components declaration def write_matrix_declaration(out_file,", "sp.Symbol('L_3_15')]) Dg[10] = sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')]) Dg[11] = sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'),", "i in range(0, 3): out_file.write(' omega_{} = in_out.IMU_Angular_Rate[{}];\\n'.format(i+1, i)) out_file.write('\\n", "write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, xg[i], i+1) xg[i] = write_symb_vector(out_file,", "= x[l_foot_id] xp_r_foot = xp[r_foot_id] xp_l_foot = xp[l_foot_id] om_r_foot =", "sp.Symbol('L_2_23'), sp.Symbol('L_3_23')]) Dg[18] = sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')]) Dg[19] = sp.Matrix([sp.Symbol('L_1_25'),", "xp out_file.write('\\n // anchor point absolute positions and velocities\\n') x", "'') # jacobian x out_file.write('\\n // jacobian anchor point positions\\n')", "relative angle before body i # # xi : absolute", "= 0 if count != 0: out_write.write(';\\n') # get tilde", "range(0,3): out_file.write(' in_out.r_Lwrist[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' // left", "out_file.write(' in_out.Lfoot_or[{}] = {};\\n'.format(i, R_l_foot[i])) out_file.write('\\n') out_file.write(' // right foot", "the anchor point of body i\\n') out_file.write(' * xgi :", "in f: cut_line = line.split(elem) if len(cut_line) == 2: count", "flag_first: flag_first = 1 flag_print = 1 elif not flag_print:", "elif axis == 3: return np.array([[0.0], [0.0], [elem]]) else: return", "= symbolic_jacob_der(Rj, xj, xgj, R_r_foot[j], 'q{}'.format(i+1)) if cur_jac != 0:", "cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left foot absolute orientation jacobian\\n')", "out_file.write(' in_out.Lfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' //", "19 14 23 //\\n') out_file.write(' * // 20 01 24", "between body i and its predecessor # si : sine", "out_file.write('\\n') out_file.write(' // torso orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file,", "out_file.write(' // wrists absolute orientation\\n') for i in range(0, 9):", "= {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' // left foot absolute velocity\\n')", "all the vector elements for j in range(0, 3): new_vector[j]", "r_wrist_z): # symbolic variables declarations x_r_elb = x[r_elb_id] x_l_elb =", "= r_wrist_y Dpt_r_wrist[2] = r_wrist_z Dpt_l_wrist[0] = r_wrist_x Dpt_l_wrist[1] =", "the two pitch hip rotations # inertial frame: located at", "R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2({}, {});\\n'.format(angle_name, R_matrix[1],", "k in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r_cont[i][k],", "inv_c_y_Lfoot = 1.0 / c_y_Lfoot;\\n\\n') out_file.write(' // right foot orientation", "= nb_bodies*[None] xgj_print = nb_bodies*[None] for i in range(0, nb_bodies):", "* Dpt_r_wrist x_l = x_l_elb + R_l_elb.T * Dpt_l_wrist xp_r", "left foot absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "def count_elem(in_file, elem): count = 0; with open(in_file, 'r') as", "vector from the previous body to the current body i", "* [None] Dpt_l_foot_cont = nb_contacts * [None] for i in", "R, x, xp, om, Rj, xj, xgj, 19, 23, -0.02,", "kinematics computation\\n') out_file.write(' *\\n') out_file.write(' * \\\\param[in,out] in_out inputs and", "# list of all terms term_list = str(symb_var).replace('- ','-').replace('-','+-').split('+') if", "// 19 14 23 //\\n') out_file.write(' * // 20 01", "range(0, 3): R[0][i,j] = sp.Symbol('IMU{}{}'.format(i+1, j+1)) write_symb_Rj(nb_bodies, Rj, xj, xgj,", "flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Lfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))", "i # ci : cosine of the relative angle before", "Dg[10] = sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')]) Dg[11] = sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')])", "nb_bodies*[None] xj[i] = nb_bodies*[None] xgj[i] = nb_bodies*[None] Rj_print[i] = nb_bodies*[None]", "xj_print[i][j] = 3 * [None] xgj_print[i][j] = 3 * [None]", "cur_split[i]) else: new_string = cur_string cur_split = filter(None, re.split(\"([A-Z][^A-Z]*)\", new_string))", "out_file.write('\\n') out_file.write(' in_out.Rfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write('", "v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]]) # get rotation", "the current body i \\n') out_file.write(' * (previous body is", "body_part, omega_in[1])) out_file.write(' in_out.{}[1] = c_z_{}*{} - s_z_{}*{};\\n'.format(omega_out, body_part, omega_in[1],", "end of the file write_file_end(file_temp) file_temp.close() # output file out_file", "if count != 0: out_write.write(';\\n') # print all declarations def", "nb_bodies): if xgj[k][i][j] != 0: if flag_first: out_file.write(' + {}*{}'.format(M[k],", "joint_id_names): out_file.write(' // -- variables initialization -- //\\n') out_file.write('\\n //", "sp.zeros(3, 1) Rj_print[i][j] = 9 * [None] xj_print[i][j] = 3", "flag_first = 1 out_file.write('({}*xgp{}_{}'.format(M[j], j+1, i+1)) if j == nb_bodies-1:", "symbolic_jacob_der(Rj, xj, xgj, x_r_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0: if", "Om[i] om[i] = write_symb_vector(out_file, om[i], 'om{}_'.format(i+1), '') om_tilde[i] = get_tilde(om[i])", "out_file.write(' *\\n') out_file.write(' * Di : position vector from the", "sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0]) Dpt[2] = sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0]) Dpt[3] =", "xj, xgj, xgj_print, x_vector, index): # loop on all the", "'theta_waist', R[waist_id], 1) out_file.write('\\n') out_file.write(' // torso orientation matrix as", "Rj_print, R[i], i+1) R[i] = write_symb_matrix(out_file, R[i], 'R{}_'.format(i+1), '') Rt[i]", "+ om_tilde[i] * (Rt[i] * Dg[i]) write_symb_xgj(nb_bodies, Rj, xj, xgj,", "-sp.Symbol('s{}'.format(der_q)) # sine elif pos_str == 's{}'.format(der_q): result += sp.Symbol('c{}'.format(der_q))", "xj, xgj, symb_var, der_var): # list of all terms term_list", "1 elem_name = '{}{}{}'.format(start_name, i+1, end_name) out_file.write(' {} = {};\\n'.format(elem_name,", "right arm 'M_26', 'M_27', 'M_28', 'M_29' # left arm ])", "(info from IMU) # # Di : position vector from", "atan2(-{}, -{});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, -sqrt({}*{} +", "R[torso_id], 1) out_file.write('\\n') out_file.write(' c_y_waist = cos(in_out.theta_waist[1]);\\n') out_file.write(' c_y_torso =", "i\\n') out_file.write(' * xpi : derivative of xi\\n') out_file.write(' *", "foot orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Rfoot', R[r_foot_id], 1)", "lines for line in f: cut_line = line.split(elem) if len(cut_line)", "out_file.write(' c_y_Rfoot = cos(in_out.theta_Rfoot[1]);\\n') out_file.write(' c_y_Lfoot = cos(in_out.theta_Lfoot[1]);\\n') out_file.write(' c_z_Rfoot", "return False # return true if it has a shape", "(Rt[i] * Dg[i]) write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, xg[i], i+1)", "before body i\\n') out_file.write(' *\\n') out_file.write(' * xi : absolute", "c_z_waist = cos(in_out.theta_waist[2]);\\n') out_file.write(' c_z_torso = cos(in_out.theta_torso[2]);\\n\\n') out_file.write(' s_y_waist =", "absolute position vector (from origin, expressed in the inertial frame)\\n')", "out_file.write(' * // 18 15 22 //\\n') out_file.write(' * //", "the lines for line in f: # declaration if len(line.split('//", "COM G_i of body i # xpi : derivative of", "= 1.0 / c_y_torso;\\n\\n') out_file.write(' // waist orientation angle derivatives", "elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name) out_file.write(' {} = {};\\n'.format(elem_name,", "1) Dpt_r_foot[2] = sp.Symbol('DPT_3_16') Dpt_l_foot[2] = sp.Symbol('DPT_3_29') Dpt_r_foot_cont = nb_contacts", "// left wrist absolute position\\n') for i in range(0,3): out_file.write('", "os.remove(in_temp) # main script # rotation axis for each joint", "range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_foot[j], 'q{}'.format(i+1)) if cur_jac", "angle before body i\\n') out_file.write(' *\\n') out_file.write(' * xi :", "if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') if i != nb_bodies-1: out_file.write('\\n')", "== max: out_file.write(';\\n') else: out_file.write(', ') # variables initialization def", "write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, x_vector, index): # loop on", "1, 3, 2, # right arm 2, 1, 3, 2", "= int(value.split('_')[0].split('x')[1]) b = int(value.split('_')[1]) return True, a, b except:", "* \\\\brief forward kinematics computation for the COMAN model\\n') out_file.write('", "epsilon = -1 -> pitch angle in [pi/2 ; 3*pi/2]", "[0.0]]) elif axis == 2: return np.array([[0.0], [-elem], [0.0]]) elif", "* frame of the previous body\\n') out_file.write(' * DGi :", "* ci : cosine of the relative angle before body", "right foot contact points jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "elif cur_len == 2: result += der_elem(cur_term_split[0], Rj, xj, xgj,", "//\\n') out_file.write(' * // 03 09 //\\n') out_file.write(' * //", "flag_print = 0 for k in range(0, 9): if Rj_print[i][j][k]", "write_symb_vector(out_file, x[i], 'x{}_'.format(i+1), '') xp[i] = write_symb_vector(out_file, xp[i], 'xp{}_'.format(i+1), '')", "= x[i] + Rt[i] * Dg[i] xgp[i] = xp[i] +", "'') xp[i] = write_symb_vector(out_file, xp[i], 'xp{}_'.format(i+1), '') # jacobian x", "0.0]) Dpt[22] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')]) Dpt[23] = sp.Matrix([0.0, 0.0,", "'M_9' , 'M_10', 'M_11', 'M_12', # right leg 'M_13', 'M_14',", "if neg_flag: result = -result return result # compute the", "out_file.write(' }\\n\\n') out_file.write(' // left foot absolute orientation jacobian\\n') out_file.write('", "= {};\\n'.format(elem_name, vector[i]).replace('1.0*','')) new_vector[i] = sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return", "global com jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') for i in", "in the relative # frame of the previous body #", "Dpt_l_wrist xp_r = xp_r_elb + om_tilde_r_elb * (R_r_elb.T * Dpt_r_wrist)", "# trunk 2, 1, 3, 2, # right arm 2,", "08 # # 03 09 # # 04 10 #", "cos(in_out.theta_waist[2]);\\n') out_file.write(' c_z_torso = cos(in_out.theta_torso[2]);\\n\\n') out_file.write(' s_y_waist = sin(in_out.theta_waist[1]);\\n') out_file.write('", "range(0, nb_bodies): if xgj[k][i][j] != 0: if flag_first: out_file.write(' +", ">= 1: count = 0 with open(in_file,'r') as f: #", ">= 6: count = 0 out_file.write(',\\n {}'.format(get_string_enum(joint_id_names[i]))) else: out_file.write(', {}'.format(get_string_enum(joint_id_names[i])))", "i)) if i == max: out_file.write(';\\n') else: out_file.write(', ') #", "# loop on all the joints for i in range", "si : sine of the relative angle before body i\\n')", "print_save_symb_matrix(new_matrix, 'R{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian of an anchor", "true if it has a shape 'x%a_%b' (indexes %a, %b", "# # ###################### # # origin: in the waist, middle", "0.0], [0.0, cosine, sine], [0.0, -sine, cosine]]) elif axis ==", "= 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write('", "cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_foot[j], 'q{}'.format(i+1)) if cur_jac !=", "return new_vector, save_vector # save the symbolic matrix for print", "3, 4, 5, # right leg 0, 7, 8, 9,", "sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')]) # masses M = np.array([ 'M_6', #", "= sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')]) Dg[23] = sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')]) #", "02 08 # # 03 09 # # 04 10", "out_file.write(' // torso orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_torso',", "out_file.write(' // right foot absolute position\\n') for i in range(0,3):", "cur_split[0].upper() for i in range(1, len(cur_split)): new_string = '{}_{}'.format(new_string, cur_split[i].upper())", "'Ri' # return true if it is a float def", "nb_max_char: out_write.write(';\\n') count = 0 if count != 0: out_write.write(';\\n')", "matrix def get_rotation_matrix(axis, direct, cosine, sine): if direct: if axis", "for i in range(0, nb_bodies): Rj[i] = nb_bodies*[None] xj[i] =", "* [None] # computation om_tilde_r_foot = get_tilde(om_r_foot) om_tilde_l_foot = get_tilde(om_l_foot)", "{} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','') new_matrix[i,j] = sp.Symbol(elem_name) return new_matrix, save_matrix", "xgji : jacobian of 'xgi' # Rji : jacobian of", "i+1, end_name) out_file.write(' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','')) new_vector[i] = sp.Symbol(elem_name)", "1.0]]) else: return np.array([]) else: if axis == 1: return", "{}'.format(get_string_enum(joint_id_names[i]))) out_file.write('};\\n\\n') out_file.write('/*! \\\\brief main kinematics computation\\n') out_file.write(' *\\n') out_file.write('", "in range(0,3): out_file.write(' in_out.rp_Rwrist[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' //", "out_file.write('{}'.format(M[i])) if i == nb_bodies-1: out_file.write(';\\n\\n') else: out_file.write(' + ')", "body i-1), expressed in the relative # frame of the", "Dg[12] = sp.Matrix([sp.Symbol('L_1_18'), 0.0 , sp.Symbol('L_3_18')]) # trunk Dg[13] =", "* Dpt_r_wrist) xp_l = xp_l_elb + om_tilde_l_elb * (R_l_elb.T *", "out_file.write(' }\\n') # omega out_file.write('\\n // joint absolute velocities\\n') Om", "the file def write_file_beginning(out_file, joint_id_names): out_file.write('/*! \\n') out_file.write(' * \\\\author", "in the relative frame of the current body i #", "# left leg Dpt[7] = sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0]) Dpt[8] =", "# frame of the previous body # Rdi : rotational", "or matrix[i,j] == 1: new_matrix[i,j] = matrix[i,j] save_matrix[3*i+j] = None", "sine], [0.0, 1.0, 0.0], [-sine, 0.0, cosine]]) elif axis ==", "derivatives [rad/s]\\n') theta_dot_compute(out_file, om[r_foot_id], 'omega_Rfoot', 'Rfoot') out_file.write('\\n') out_file.write(' // left", "# 02 08 # # 03 09 # # 04", "'M_14', 'M_15', 'M_16', 'M_17', 'M_18', # left leg 'M_19', 'M_20',", "# return true if it has a shape 'x%a_%b' (indexes", "# angles (position and derivative) of the waist and the", "for i in range(1, nb_bodies): for j in range(0, 3):", "origin (waist), but aligned with the ground (info from IMU)\\n')", "yaw_pitch_roll_angles(out_file, angle_name, R_matrix, epsilon): if epsilon > 0: # epsilon", "out_file.write(' * xgi : absolute position vector of the COM", "0: if flag_first: out_file.write(' + {}*{}'.format(M[k], str(xgj[k][i][j]))) else: flag_first =", "and replace symbolic variable by its name def write_symb_vector(out_file, vector,", "of body i # xgi : absolute position vector of", "mass) G_i,\\n') out_file.write(' * expressed in the relative frame of", "nb_bodies = len(parent_body_index) ## anchor point positions Dpt = nb_bodies*[None]", "arm ]) nb_bodies = len(parent_body_index) ## anchor point positions Dpt", "// 04 10 //\\n') out_file.write(' * // 05 11 //\\n')", "x_r[i])) out_file.write('\\n') out_file.write(' // right foot absolute velocity\\n') for i", "0.0]) Dpt[9] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')]) Dpt[10] = sp.Matrix([0.0, 0.0,", "= sp.Matrix([0.0, 0.0, 0.0]) # trunk Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'), 0.0,", "= nb_bodies*[None] for i in range(0, nb_bodies): Rj[i] = nb_bodies*[None]", "{});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, sqrt({}*{} + {}*{}));\\n'.format(angle_name,", "range(0, 3): new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xj[index-1][i],", "flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k,", "of Ri\\n') out_file.write(' * xji : jacobian of \\'xi\\'\\n') out_file.write('", "out_file.write(' *\\n') out_file.write(' * computation of:\\n') out_file.write(' * COM (center", "time derivatives of 'yaw_pitch_roll_angles' def theta_dot_compute(out_file, omega_in, omega_out, body_part): out_file.write('", "sp.zeros(3, 1) Dpt_l_wrist = sp.zeros(3, 1) Dpt_r_wrist[0] = r_wrist_x Dpt_r_wrist[1]", "= 1 out_file.write('\\n') out_file.write(' in_out.Rwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write('", "np.array([[-elem], [0.0], [0.0]]) elif axis == 2: return np.array([[0.0], [-elem],", "range(0,3): for j in range(0,3): out_file.write('{}{}{}'.format(prefix, i+1, j+1)) if i", "sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')]) # right arm Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'),", "= sp.Symbol('omega_{}'.format(i+1)) om[0] = write_symb_vector(out_file, om[0], 'om1_', '') om_tilde[0] =", "left foot absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Lfoot[{}]", "r_elb_id, l_elb_id, r_wrist_x, r_wrist_y, r_wrist_z): # symbolic variables declarations x_r_elb", "the relative angle before body i\\n') out_file.write(' * ci :", "2, # right arm 2, 1, 3, 2 # left", "if cur_jac != 0: if not flag_first: flag_first = 1", "IMU)\\n') out_file.write(' *\\n') out_file.write(' * Di : position vector from", "17, 18, # right arm 15, 20, 21, 22 #", "sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')]) Dpt[12] = sp.Matrix([0.0, 0.0, 0.0]) # trunk", "rot_flag: result += Rj[a-1][der_q-1][(b-1)*3+(c-1)] # vector elif vec_flag: result +=", "point between the two pitch hip rotations\\n') out_file.write(' * inertial", "and xgj (jacobian) Rj = nb_bodies*[None] xj = nb_bodies*[None] xgj", "as sp import re import os ###################### # # #", "*/\\n') out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\\n{\\n') # compute the center of mass", "01 24 //\\n') out_file.write(' * // 02 08 //\\n') out_file.write('", "// IMU - angles velocity\\n') for i in range(0, 3):", "nb_max_line: out_write.write(';\\n') count = 0 if count != 0: out_write.write(';\\n')", "out_file.write(' * // 19 14 23 //\\n') out_file.write(' * //", "1) Dpt_l_foot = sp.zeros(3, 1) Dpt_r_foot[2] = sp.Symbol('DPT_3_16') Dpt_l_foot[2] =", "rotation matrices\\n') for i in range(0, 3): for j in", "for i in range(1, nb_bodies): Rd[i] = get_rotation_matrix(rot_axis[i], 1, sp.Symbol('c{}'.format(i+1)),", "xgji : jacobian of \\'xgi\\'\\n') out_file.write(' * Rji : jacobian", "xgj[k][i][j] != 0: if flag_first: out_file.write(' + {}*{}'.format(M[k], str(xgj[k][i][j]))) else:", "Dpt, Dg, M) # end of the file write_file_end(file_temp) file_temp.close()", "'M_28', 'M_29' # left arm ]) # joint names joint_id_names", "in range(0, nb_bodies): for j in range(1, nb_bodies): flag_print =", "other else: [rot_flag, a, b, c] = isRot(pos_str) [vec_flag, d,", "10 //\\n') out_file.write(' * // 05 11 //\\n') out_file.write(' *", "range(0, nb_bodies): out_file.write('{}'.format(M[i])) if i == nb_bodies-1: out_file.write(';\\n\\n') else: out_file.write('", "x_l = x_l_elb + R_l_elb.T * Dpt_l_wrist xp_r = xp_r_elb", "# other else: [rot_flag, a, b, c] = isRot(pos_str) [vec_flag,", "xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xgj[index-1][i], xgj_print[index-1][i]] = print_save_symb_vector(new_vector, 'xg{}_'.format(index), '_d{}'.format(i+1))", "flag_print = 0 for k in range(0, 3): if xj_print[i][j][k]", "sp.Symbol('DPT_3_32')]) # right arm Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')]) Dpt[17]", "23, -0.02, -0.005, -0.225) torso_waist_angles(out_file, R, om, 0, 15) #", "]) out_file_name = 'forward_kinematics' gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt,", "ci : cosine of the relative angle before body i\\n')", "xi\\n') out_file.write(' * xgpi : derivative of xgi\\n') out_file.write(' *", "0 for i in range (1, nb_bodies): flag_print = 0", "position\\n') for i in range(0,3): out_file.write(' in_out.r_Lfoot[{}] = {};\\n'.format(i, x_l[i]))", "jacobian of 'xgi' # Rji : jacobian of 'Ri' #", "out_file.write(' * Rdi : rotational matrix between body i and", "out_file.write(' *\\n') out_file.write(' * ////////////////////////\\n') out_file.write(' * // //\\n') out_file.write('", "variable by its name def write_symb_vector(out_file, vector, start_name, end_name): new_vector", "b = int(value.split('_')[1][0]) c = int(value.split('_')[1][1]) return True, a, b,", "out_file.write('\\n') out_file.write(' // right foot contact points jacobian\\n') out_file.write(' if", "range(0, nb_contacts): for j in range (1, nb_bodies): flag_print =", "angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_waist', R[waist_id], 1) out_file.write('\\n') out_file.write(' // torso", "variables initialization -- //\\n') out_file.write('\\n // IMU - rotation matrices\\n')", "end_name): new_matrix = sp.zeros(3, 3) flag_print = 0 for i", "in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_elb[j], 'q{}'.format(i+1)) if", "= nb_bodies*[None] xj_print[i] = nb_bodies*[None] xgj_print[i] = nb_bodies*[None] for j", "joint absolute velocities\\n') Om = nb_bodies*[None] om = nb_bodies*[None] om_tilde", "= sp.Symbol('DPT_3_29') Dpt_r_foot_cont = nb_contacts * [None] Dpt_l_foot_cont = nb_contacts", "in range(0,3): for j in range(0,3): if matrix[i,j] == 0", "= x_r_foot + R_r_foot.T * Dpt_r_foot x_l = x_l_foot +", "joint cosines\\n') for i in range(1, nb_bodies): out_file.write(' c{} =", "x_l_cont = nb_contacts * [None] # computation om_tilde_r_foot = get_tilde(om_r_foot)", "joint_id_names, M, xg, xgp, xgj): out_file.write(' m_tot = ') for", "for i in range(0, 3): for j in range(0, 3):", "in range(0, 9): out_file.write(' in_out.Lfoot_or[{}] = {};\\n'.format(i, R_l_foot[i])) out_file.write('\\n') out_file.write('", "sp.zeros(3, 1) for i in range(0,3): om[0][i] = sp.Symbol('omega_{}'.format(i+1)) om[0]", "Rt = nb_bodies*[None] Rd = nb_bodies*[None] Rd[0] = sp.zeros(3, 3)", "sp.Symbol('L_2_21'), sp.Symbol('L_3_21')]) # right arm Dg[16] = sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')])", "= cur_string cur_split = filter(None, re.split(\"([A-Z][^A-Z]*)\", new_string)) new_string = cur_split[0].upper()", "in range(0, 9): out_file.write(' in_out.Rfoot_or[{}] = {};\\n'.format(i, R_r_foot[i])) out_file.write('\\n') for", "if xgj_print[i][j][k] != None: if not flag_first: flag_first = 1", "{};\\n'.format(elem_name, vector[i]).replace('1.0*','')) new_vector[i] = sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return new_vector", "nb_bodies-1: out_file.write(';\\n\\n') else: out_file.write(' + ') out_file.write(' // global com", "flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Rfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))", "in the inertial frame) # of the anchor point of", "for i in range (1, nb_bodies): new_matrix = sp.zeros(3, 3)", "//\\n') out_file.write(' * // 19 14 23 //\\n') out_file.write(' *", "absolute position\\n') for i in range(0, 3): out_file.write(' in_out.r_COM[{}] =", "x, xp, om, Rj, xj, xgj, r_foot_id, l_foot_id, x_min, x_max,", "right foot absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Rfoot[{}]", "c{} = cos(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint sines\\n') for i", "out_file.write(' // left foot contact points absolute position\\n') for i", "R_r_elb[i])) out_file.write('\\n') for i in range(0, 9): out_file.write(' in_out.Lwrist_or[{}] =", "xj = nb_bodies*[None] xgj = nb_bodies*[None] Rj_print = nb_bodies*[None] xj_print", "# xji : jacobian of 'xi' # xgji : jacobian", "2: return np.array([[cosine, 0.0, sine], [0.0, 1.0, 0.0], [-sine, 0.0,", "24 //\\n') out_file.write(' * // 02 08 //\\n') out_file.write(' *", "+= 1 if i == 1: out_file.write('{}'.format(get_string_enum(joint_id_names[i]))) elif count >=", "!= 0: if not flag_first: flag_first = 1 flag_print =", "range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_foot[j], 'q{}'.format(i+1)) if cur_jac", "= sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')]) Dpt[23] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')]) ##", "* // 18 15 22 //\\n') out_file.write(' * // 19", "= R[r_foot_id] R_l_foot = R[l_foot_id] Dpt_r_foot = sp.zeros(3, 1) Dpt_l_foot", "if it is a float def isInt(value): try: int(value) return", "flag_first = 1 flag_print = 1 elif not flag_print: flag_print", "\\\\param[in,out] in_out inputs and outputs class\\n') out_file.write(' *\\n') out_file.write(' *", "for the enumeration of joints def get_string_enum(cur_string): cur_split = cur_string.split('_')", "kinematics computation for the COMAN model\\n') out_file.write(' */\\n\\n') out_file.write('// joints", "out_file.write(' c_z_waist = cos(in_out.theta_waist[2]);\\n') out_file.write(' c_z_torso = cos(in_out.theta_torso[2]);\\n\\n') out_file.write(' s_y_waist", "+ {}*xgp{}_{}'.format(M[j], j+1, i+1)) else: flag_first = 1 out_file.write('({}*xgp{}_{}'.format(M[j], j+1,", "joint_id_names) # variables initialization write_intialization(file_temp, nb_bodies, joint_id_names) # symbolic computation", "epsilon): if epsilon > 0: # epsilon = 1 ->", "sp.zeros(3, 1) Dpt_r_foot_cont[0][0] = x_min Dpt_r_foot_cont[1][0] = x_min Dpt_r_foot_cont[2][0] =", "axis for each joint before body i (1:x, 2:y, 3:z)", "*\\n') out_file.write(' * computation of:\\n') out_file.write(' * COM (center of", "min, max): out_file.write(' double ') for i in range(min, max+1):", "theta_dot_compute(out_file, om[r_foot_id], 'omega_Rfoot', 'Rfoot') out_file.write('\\n') out_file.write(' // left foot orientation", "out_file.write('{}{}'.format(prefix, i)) if i == max: out_file.write(';\\n') else: out_file.write(', ')", "c_y_waist = cos(in_out.theta_waist[1]);\\n') out_file.write(' c_y_torso = cos(in_out.theta_torso[1]);\\n') out_file.write(' c_z_waist =", "else: out_file.write(' + ') out_file.write(' // global com absolute position\\n')", "j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_foot[j], 'q{}'.format(i+1))", "= nb_bodies*[None] om_tilde = nb_bodies*[None] Om[0] = sp.zeros(3, 1) om[0]", "3, 2, 1, 2, # right leg 2, 1, 3,", "of a com point def write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print,", "foot contact points jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first =", "cur_term_split = cur_term.split('*') cur_len = len(cur_term_split) # no product if", "R[0], 1) R[0] = write_symb_matrix(out_file, R[0], 'R1_', '') Rt[0] =", "np.array([[elem], [0.0], [0.0]]) elif axis == 2: return np.array([[0.0], [elem],", "14 23 # # 20 01 24 # # 02", "== 1: result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var) #", "// //\\n') out_file.write(' * ////////////////////////\\n') out_file.write(' *\\n') out_file.write(' * origin:", "re import os ###################### # # # 17 16 21", "Dg[13] = sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')]) Dg[14] = sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')])", "out_file.write(' * Rji : jacobian of \\'Ri\\'\\n') out_file.write(' */\\n') out_file.write('void", "body_part, body_part, body_part, omega_in[1], body_part, omega_in[0], omega_in[2])) # angles (position", "count = 0; with open(in_file, 'r') as f: # loop", "the current body i # Omi : rotational vector from", "if term_list[0] == '': term_list.pop(0) result = 0 # loop", "= sp.zeros(3, 1) xgj[i][j] = sp.zeros(3, 1) Rj_print[i][j] = 9", "to its COM (center of mass) G_i, # expressed in", "all the lines for line in f: # declaration if", "for k in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj,", "out_write.write(' double {}'.format(cut_line[0].strip())) else: out_write.write(', {}'.format(cut_line[0].strip())) count += len(cut_line[0].strip()) +", "torso orientaion angles and derivatives\\n') out_file.write(' *\\n') out_file.write(' * ////////////////////////\\n')", "frame) # of the anchor point of body i #", "\\\\file forward_kinematics.cc\\n') out_file.write(' * \\\\brief forward kinematics computation for the", "else: flag_print = 1 elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name)", "= sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return new_matrix # save the", "the COMAN model\\n') out_file.write(' */\\n\\n') out_file.write('// joints enumeration\\n') out_file.write('enum {')", "* [None] xgj_print[i][j] = 3 * [None] # rotation matrices", "* Dpt[0] xp[0] = om_tilde[0] * (Rt[0] * Dpt[0]) write_symb_xj(nb_bodies,", "M, xg, xgp, xgj) feet_compute(out_file, joint_id_names, R, x, xp, om,", "M, xg, xgp, xgj): out_file.write(' m_tot = ') for i", "j+1, i+1)) if j == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else:", "6, 12, -0.06, 0.08, -0.045, 0.045) wrists_compute(out_file, joint_id_names, R, x,", "declarations def print_all_declaration(in_file, out_write, nb_max_char): count = 0 with open(in_file,'r')", "5, # right leg 0, 7, 8, 9, 10, 11,", "out_file.write(' // left foot absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n", "vector[i] == 1: new_vector[i] = vector[i] else: flag_print = 1", "write_symb_matrix(out_file, matrix, start_name, end_name): new_matrix = sp.zeros(3, 3) flag_print =", "'_d{}'.format(i+1)) # symbolic computation def symbolic_computation(out_file, nb_bodies, joint_id_names, rot_axis, parent_body_index,", "out_file.write(' + {}*xgp{}_{}'.format(M[j], j+1, i+1)) else: flag_first = 1 out_file.write('({}*xgp{}_{}'.format(M[j],", "in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_foot[j], 'q{}'.format(i+1)) if", "= sin(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint relative velocities\\n') for i", "file out_file.write(line) out_file.close() # remove temporary file os.remove(in_temp) # main", "vector[i] else: flag_print = 1 elem_name = '{}{}{}'.format(start_name, i+1, end_name)", "Om{} = in_out.qd_mot[{}];\\n'.format(i+1, joint_id_names[i])) # write symbolic vector and replace", "0 or matrix[i,j] == 1: new_matrix[i,j] = matrix[i,j] save_matrix[3*i+j] =", "// torso orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[torso_id], 'omega_torso', 'torso')", "the end of the file def write_file_end(out_file): out_file.write('}\\n') # print", "def der_elem(elem_str, Rj, xj, xgj, der_var): # element to derive", "omega_in[1])) out_file.write(' in_out.{}[1] = c_z_{}*{} - s_z_{}*{};\\n'.format(omega_out, body_part, omega_in[1], body_part,", "s_z_waist = sin(in_out.theta_waist[2]);\\n') out_file.write(' s_z_torso = sin(in_out.theta_torso[2]);\\n\\n') out_file.write(' if ((!c_y_waist)", "in_out.Lfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // right", "xgj, xgj_print, xg[i], i+1) xg[i] = write_symb_vector(out_file, xg[i], 'xg{}_'.format(i+1), '')", "= nb_contacts * [None] x_l_cont = nb_contacts * [None] #", "# main script # rotation axis for each joint before", "from the anchor point of the previous body to the", "pos_str = elem_split[0] elif cur_len == 2: # negative neg_flag", "the two pitch hip rotations\\n') out_file.write(' * inertial frame: located", "out_file.write(' in_out.rp_Rwrist[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' // right wrist", "velocities\\n') for i in range(1, nb_bodies): out_file.write(' Om{} = in_out.qd_mot[{}];\\n'.format(i+1,", "also returned) def isVec(value): try: a = int(value.split('_')[0].split('x')[1]) b =", "out_file.write(' // left wrist absolute velocity\\n') for i in range(0,3):", "positions\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0 for i", "1, 3, 2, 1, 2, # left leg 1, 2,", "or 1 !'.format(cur_len-1)) exit() return result # write the beginning", "i in range(0, 9): out_file.write(' in_out.Rfoot_or[{}] = {};\\n'.format(i, R_r_foot[i])) out_file.write('\\n')", "}\\n\\n') out_file.write(' // left wrist absolute orientation jacobian\\n') out_file.write(' if", "of the previous body to the current body i #", "out_file.write('\\n') out_file.write(' // right foot absolute orientation jacobian\\n') out_file.write(' if", "if axis == 1: return np.array([[-elem], [0.0], [0.0]]) elif axis", "x, xp, om, Rj, xj, xgj, r_elb_id, l_elb_id, r_wrist_x, r_wrist_y,", "= int(value.split('_')[0].split('R')[1]) b = int(value.split('_')[1][0]) c = int(value.split('_')[1][1]) return True,", "= xp_l_elb + om_tilde_l_elb * (R_l_elb.T * Dpt_l_wrist) # writing", "x_l[i])) out_file.write('\\n') out_file.write(' // left wrist absolute velocity\\n') for i", "vector and replace symbolic variable by its name def write_symb_vector(out_file,", "IMU - angles velocity\\n') for i in range(0, 3): out_file.write('", "R_matrix[0])) else: # epsilon = -1 -> pitch angle in", "out_file.write(' double ') for i in range(min, max+1): out_file.write('{}{}'.format(prefix, i))", "np.array([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]]) #", "(previous body is not always body i-1), expressed in the", "3: return np.array([[cosine, -sine, 0.0], [sine, cosine, 0.0], [0.0, 0.0,", "position vector of the COM G_i of body i\\n') out_file.write('", "[None] for i in range(0,3): for j in range(0,3): if", "for i in range(0, nb_bodies): xg[i] = x[i] + Rt[i]", "file in_temp = './{}_temp.cc'.format(out_file_name) file_temp = open(in_temp, 'w') # beginning", "range(1, nb_bodies): flag_print = 0 for k in range(0, 9):", "i (1:x, 2:y, 3:z) rot_axis = np.array([0, # waist 2,", "i # # xi : absolute position vector (from origin,", "{};\\n'.format(i, R_l_foot[i])) out_file.write('\\n') out_file.write(' // right foot absolute orientation jacobian\\n')", "\\'xi\\'\\n') out_file.write(' * xgji : jacobian of \\'xgi\\'\\n') out_file.write(' *", "x_l_foot = x[l_foot_id] xp_r_foot = xp[r_foot_id] xp_l_foot = xp[l_foot_id] om_r_foot", "not flag_first: flag_first = 1 flag_print = 1 elif not", "out_file = open('./{}.cc'.format(out_file_name), 'w') with open(in_temp, 'r') as f: #", "{};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','') new_matrix[i,j] = sp.Symbol(elem_name) return new_matrix, save_matrix # write", "out_file.write(' s_y_waist = sin(in_out.theta_waist[1]);\\n') out_file.write(' s_y_torso = sin(in_out.theta_torso[1]);\\n') out_file.write(' s_z_waist", "Rj_print[i][j][k] != None: if not flag_first: flag_first = 1 flag_print", "out_file.write(' * Ri : absolute rotational matrix\\n') out_file.write(' * Rti", "the relative angle before body i # # xi :", "range (1, nb_bodies): flag_print = 0 for k in range(0,", ", # left leg 'TorsoRoll_id' , 'TorsoPitch_id' , 'TorsoYaw_id' ,", ": rotational matrix between body i and its predecessor\\n') out_file.write('", "x = nb_bodies*[None] xp = nb_bodies*[None] x[0] = Rt[0] *", "//\\n') out_file.write(' * // 17 16 21 //\\n') out_file.write(' *", "x_l_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0: if not flag_first: flag_first", "') # print variables declaration def write_variables_declaration(out_file, prefix, min, max):", "yaw_pitch_roll_angles(out_file, 'theta_Rfoot', R[r_foot_id], 1) out_file.write('\\n') out_file.write(' // left foot orientation", "sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0]) Dpt[8] = sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0]) Dpt[9] =", "get_tilde(v): return np.array([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0],", "= 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac))", "rotation matrix if rot_flag: result += Rj[a-1][der_q-1][(b-1)*3+(c-1)] # vector elif", "cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // wrists absolute orientation\\n') for i", "in_out.Rwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left", "1) # loop on all the vector elements for j", "sp.Symbol('L_2_14'), sp.Symbol('L_3_14')]) Dg[9] = sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')]) Dg[10] = sp.Matrix([sp.Symbol('L_1_16'),", "previous body to the current body i \\n') out_file.write(' *", "nb_contacts): for j in range(0, 3): Dpt_l_foot_cont[i][j] = Dpt_r_foot_cont[i][j] x_r_cont", "'Rfoot') out_file.write('\\n') out_file.write(' // left foot orientation angle derivatives [rad/s]\\n')", "name def write_symb_vector(out_file, vector, start_name, end_name): new_vector = sp.zeros(3, 1)", "sp.Symbol('L_3_18')]) # trunk Dg[13] = sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')]) Dg[14] =", "waist, middle point between the two pitch hip rotations #", "-1, # waist 0, 1, 2, 3, 4, 5, #", "sp.Symbol('L_3_13')]) Dg[8] = sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')]) Dg[9] = sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'),", "out_file.write(' // right foot absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n", "of the anchor point of body i # xgi :", "positive/negative elem_split = elem_str.split('-') cur_len = len(elem_split) if cur_len ==", "of 1 or 2 in negative detection !'.format(cur_len)) exit() #", "// right foot orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[r_foot_id], 'omega_Rfoot',", "= in_out.IMU_Angular_Rate[{}];\\n'.format(i+1, i)) out_file.write('\\n // joint cosines\\n') for i in", "= 0 for k in range(0, 9): if Rj_print[i][j][k] !=", "gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M): # temporary", "np.array([]) else: if axis == 1: return np.array([[-elem], [0.0], [0.0]])", "yaw_pitch_roll_angles(out_file, 'theta_torso', R[torso_id], 1) out_file.write('\\n') out_file.write(' c_y_waist = cos(in_out.theta_waist[1]);\\n') out_file.write('", "# loop on all terms for cur_term in term_list: #", "0.045) wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj,", "out_file.write(' in_out.r_Rfoot[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' // right foot", "Dpt_r_foot_cont[i][j] x_r_cont = nb_contacts * [None] x_l_cont = nb_contacts *", "Dg[15] = sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')]) # right arm Dg[16] =", ", 'LeftHipRoll_id' , 'LeftHipYaw_id' , 'LeftKneePitch_id' , 'LeftFootRoll_id' , 'LeftFootPitch_id'", "if direct: if axis == 1: return np.array([[1.0, 0.0, 0.0],", "sp.Symbol(elem_name) return new_matrix, save_matrix # write symbolic jacobian of a", "term_list: # detect products cur_term_split = cur_term.split('*') cur_len = len(cur_term_split)", "0: out_write.write(';\\n') # print all declarations def print_all_declaration(in_file, out_write, nb_max_char):", "elif axis == 3: return np.array([[cosine, sine, 0.0], [-sine, cosine,", "# rotation axis for each joint before body i (1:x,", "c_z_Lfoot = cos(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' s_y_Rfoot = sin(in_out.theta_Rfoot[1]);\\n') out_file.write(' s_y_Lfoot =", "R_l_elb[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first", "sp.Symbol('DPT_3_4')]) Dpt[14] = sp.Matrix([0.0, 0.0, 0.0]) Dpt[15] = sp.Matrix([0.0, 0.0,", "xp_r_elb + om_tilde_r_elb * (R_r_elb.T * Dpt_r_wrist) xp_l = xp_l_elb", "*\\n') out_file.write(' * \\\\param[in,out] in_out inputs and outputs class\\n') out_file.write('", "out_file.write(' // left foot absolute velocity\\n') for i in range(0,3):", "# write symbolic jacobian of a rotation matrix def write_symb_Rj(nb_bodies,", "trunk Dg[13] = sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')]) Dg[14] = sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'),", "Rj_print, R_matrix, index): # loop on all the joints for", "3: return np.array([[0.0], [0.0], [-elem]]) else: return np.array([]) # compute", "x_r_foot + R_r_foot.T * Dpt_r_foot_cont[i] x_l_cont[i] = x_l_foot + R_l_foot.T", "the joints for i in range (1, nb_bodies): new_matrix =", "in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1))", "flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))", "= 1 out_file.write('\\n') out_file.write('{}'.format(Rj_print[i][j][k])) out_file.write(' }\\n') # omega out_file.write('\\n //", "1 flag_print = 1 elif not flag_print: flag_print = 1", "number of 'elem' in the file def count_elem(in_file, elem): count", "right leg 2, 1, 3, 2, 1, 2, # left", ", sp.Symbol('L_3_7')]) Dg[2] = sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8') , sp.Symbol('L_3_8')]) Dg[3]", "sp.Symbol('L_3_17')]) Dg[12] = sp.Matrix([sp.Symbol('L_1_18'), 0.0 , sp.Symbol('L_3_18')]) # trunk Dg[13]", "* xpi : derivative of xi\\n') out_file.write(' * xgpi :", "relative angle before body i # ci : cosine of", "{') count = 0 for i in range(1, len(joint_id_names)): count", "[v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]]) # get rotation matrix", "elif axis == 2: return np.array([[cosine, 0.0, -sine], [0.0, 1.0,", "leg 'M_19', 'M_20', 'M_21', # trunk 'M_22', 'M_23', 'M_24', 'M_25',", "'r') as f: # loop on all the lines for", "j in range(0,3): out_file.write('{}{}{}'.format(prefix, i+1, j+1)) if i == 2", "out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\\n{\\n') # compute the center of mass position", "for j in range(0, 3): out_file.write(' IMU{}{} = in_out.IMU_Orientation[{}];\\n'.format(i+1, j+1,", "sine of the relative angle before body i # ci", "16, 17, 18, # right arm 15, 20, 21, 22", "R, x, xp, om, Rj, xj, xgj, r_foot_id, l_foot_id, x_min,", "j in range(0, nb_bodies): if flag_first: out_file.write(' + {}*xgp{}_{}'.format(M[j], j+1,", "= symbolic_jacob_der(Rj, xj, xgj, R_matrix[j], 'q{}'.format(i+1)) [Rj[index-1][i], Rj_print[index-1][i]] = print_save_symb_matrix(new_matrix,", "3): new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xgj[index-1][i], xgj_print[index-1][i]]", "* inertial frame: located at the origin (waist), but aligned", "'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first =", "= cos(in_out.theta_Lfoot[1]);\\n') out_file.write(' c_z_Rfoot = cos(in_out.theta_Rfoot[2]);\\n') out_file.write(' c_z_Lfoot = cos(in_out.theta_Lfoot[2]);\\n\\n')", "out_file.write(' in_out.Rfoot_or[{}] = {};\\n'.format(i, R_r_foot[i])) out_file.write('\\n') for i in range(0,", "= None else: elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name) save_matrix[3*i+j]", "out_file.write(' in_out.r_Lfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' //", "'q{}'.format(j+1)) if cur_jac != 0: if not flag_first: flag_first =", "orientation\\n') for i in range(0, 9): out_file.write(' in_out.Rfoot_or[{}] = {};\\n'.format(i,", "////////////////////////\\n') out_file.write(' * // //\\n') out_file.write(' * // 17 16", "xp_r = xp_r_elb + om_tilde_r_elb * (R_r_elb.T * Dpt_r_wrist) xp_l", "replace symbolic variable by its name def write_symb_matrix(out_file, matrix, start_name,", "os ###################### # # # 17 16 21 # #", "# Di : position vector from the anchor point of", "sp.zeros(3, 1) xgj[i][j] = sp.zeros(3, 1) Rj_print[i][j] = 9 *", "* Dpt[i]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[i], i+1) x[i]", "xgj, der_var)*sp.Symbol(cur_term_split[1].strip()) result += der_elem(cur_term_split[1], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[0].strip()) #", "'R{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian of an anchor point", "rotation matrix def write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R_matrix, index):", "axis == 3: return np.array([[cosine, -sine, 0.0], [sine, cosine, 0.0],", ": absolute rotational vector of body i\\n') out_file.write(' * Ri", "yaw_pitch_roll_angles(out_file, 'theta_waist', R[waist_id], 1) out_file.write('\\n') out_file.write(' // torso orientation matrix", "[rad/s]\\n') theta_dot_compute(out_file, om[torso_id], 'omega_torso', 'torso') # compute the feet position,", "vector[i] == 1: new_vector[i] = vector[i] save_vector[i] = None else:", "Dg[11] = sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')]) Dg[12] = sp.Matrix([sp.Symbol('L_1_18'), 0.0 ,", "results out_file.write('\\n // -- Collecting results -- //\\n\\n') com_compute(out_file, nb_bodies,", "if k == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') if", "for i in range(0,3): out_file.write(' in_out.r_Lwrist[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n')", "2: if len(cut_line[0].split('[')) == 1: if count == 0: out_write.write('", "= int(value.split('_')[1][1]) return True, a, b, c except: return False,", "derivative of an expression (for jacobian) def symbolic_jacob_der(Rj, xj, xgj,", "compute the roll, pitch, yaw angles (and derivative) def yaw_pitch_roll_angles(out_file,", "position\\n') for i in range(0,3): out_file.write(' in_out.r_Lwrist[{}] = {};\\n'.format(i, x_l[i]))", "with the ground (info from IMU)\\n') out_file.write(' *\\n') out_file.write(' *", "'M_7' , 'M_8' , 'M_9' , 'M_10', 'M_11', 'M_12', #", "torso_id): out_file.write(' // waist orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file,", "= sin(in_out.theta_torso[2]);\\n\\n') out_file.write(' if ((!c_y_waist) || (!c_y_torso))\\n {\\n') out_file.write(' return;\\n", "as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_torso', R[torso_id], 1) out_file.write('\\n') out_file.write(' c_y_waist", "of body i # Ri : absolute rotational matrix #", "= sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')]) ## COM positions Dg = nb_bodies*[None]", "sp.zeros(3, 3) R[0] = sp.zeros(3, 3) for i in range(0,", "# right leg Dpt[1] = sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0]) Dpt[2] =", "vector axis def get_vector_axis(axis, direct, elem): if direct: if axis", "in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_elb[j], 'q{}'.format(i+1)) if", "(Rt[parent_id] * Dpt[i]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[i], i+1)", "der_q = int(der_var.replace('q','')) # detect positive/negative elem_split = elem_str.split('-') cur_len", "'RightShPitch_id' , 'RightShRoll_id' , 'RightShYaw_id' , 'RightElbPitch_id', # right arm", "c_z_Rfoot = cos(in_out.theta_Rfoot[2]);\\n') out_file.write(' c_z_Lfoot = cos(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' s_y_Rfoot =", "rotational vector of body i\\n') out_file.write(' * Ri : absolute", "# print all declarations def print_all_declaration(in_file, out_write, nb_max_char): count =", "}\\n\\n') out_file.write(' // wrists absolute orientation\\n') for i in range(0,", "out_file.close() # remove temporary file os.remove(in_temp) # main script #", "= write_symb_vector(out_file, xp[0], 'xp1_', '') for i in range(1, nb_bodies):", "for i in range(0, nb_contacts): Dpt_r_foot_cont[i][2] = sp.Symbol('DPT_3_16') for i", "1) Dpt_l_foot_cont[i] = sp.zeros(3, 1) Dpt_r_foot_cont[0][0] = x_min Dpt_r_foot_cont[1][0] =", "1 out_file.write('\\n') out_file.write(' in_out.r_Rwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n')", "neg_flag = 1 pos_str = elem_split[1] else: print('Error: {} instead", "xp[0] = om_tilde[0] * (Rt[0] * Dpt[0]) write_symb_xj(nb_bodies, Rj, xj,", "end_name) out_file.write(' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','')) new_vector[i] = sp.Symbol(elem_name) if", "joints for i in range (1, nb_bodies): new_vector = sp.zeros(3,", "== 3: return np.array([[cosine, -sine, 0.0], [sine, cosine, 0.0], [0.0,", "mass) G_i, # expressed in the relative frame of the", "if xgj[k][i][j] != 0: if flag_first: out_file.write(' + {}*{}'.format(M[k], str(xgj[k][i][j])))", "2, 1, 3, 2, # right arm 2, 1, 3,", "= x[r_elb_id] x_l_elb = x[l_elb_id] xp_r_elb = xp[r_elb_id] xp_l_elb =", "out_file.write(' s_z_Rfoot = sin(in_out.theta_Rfoot[2]);\\n') out_file.write(' s_z_Lfoot = sin(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' if", "new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xgj[index-1][i], xgj_print[index-1][i]] =", "xgj, der_var): # element to derive (string) elem_str = elem_str.replace('-", "i == 1: out_file.write('{}'.format(get_string_enum(joint_id_names[i]))) elif count >= 6: count =", "# 03 09 # # 04 10 # # 05", "torso_waist_angles(out_file, R, om, waist_id, torso_id): out_file.write(' // waist orientation matrix", "= sp.zeros(3, 1) for i in range(0,3): om[0][i] = sp.Symbol('omega_{}'.format(i+1))", "range(1, nb_bodies): flag_print = 0 for k in range(0, 3):", "i+1, j+1)) if i == 2 and j == 2:", ", 'RightShRoll_id' , 'RightShYaw_id' , 'RightElbPitch_id', # right arm 'LeftShPitch_id'", "angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[torso_id], 'omega_torso', 'torso') # compute the", "Rj, xj, xgj, r_elb_id, l_elb_id, r_wrist_x, r_wrist_y, r_wrist_z): # symbolic", "= r_wrist_x Dpt_l_wrist[1] = -r_wrist_y Dpt_l_wrist[2] = r_wrist_z # computation", "sp.Symbol('DPT_3_50')]) ## COM positions Dg = nb_bodies*[None] # waist Dg[0]", "xp[r_foot_id] xp_l_foot = xp[l_foot_id] om_r_foot = om[r_foot_id] om_l_foot = om[l_foot_id]", "out_file.write(' in_out.r_Rwrist[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' // right wrist", "Dpt_l_wrist[1] = -r_wrist_y Dpt_l_wrist[2] = r_wrist_z # computation om_tilde_r_elb =", "joint_id_names = np.array(['0', # waist 'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id',", "(center of mass) G_i, # expressed in the relative frame", "a, b, c except: return False, -1, -1, -1 #", "jacobian of a rotation matrix def write_symb_Rj(nb_bodies, Rj, xj, xgj,", "{}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2(-{},", "parent_id = parent_body_index[i] Om[i] = get_vector_axis(rot_axis[i], 1, sp.Symbol('Om{}'.format(i+1))) om[i] =", "0 for j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj,", "sp.Symbol('DPT_2_2'), 0.0]) Dpt[2] = sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0]) Dpt[3] = sp.Matrix([0.0,", "atan2({}, {});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, sqrt({}*{} +", "# ci : cosine of the relative angle before body", "else: out_file.write(', ') # variables initialization def write_intialization(out_file, nb_bodies, joint_id_names):", "left leg 1, 2, 3, # trunk 2, 1, 3,", "not always body i-1), expressed in the relative # frame", "torso orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[torso_id], 'omega_torso', 'torso') #", "sp.Symbol('DPT_3_16') Dpt_l_foot[2] = sp.Symbol('DPT_3_29') Dpt_r_foot_cont = nb_contacts * [None] Dpt_l_foot_cont", "count != 0: out_write.write(';\\n') # print all declarations def print_all_declaration(in_file,", "xj[i][j] = sp.zeros(3, 1) xgj[i][j] = sp.zeros(3, 1) Rj_print[i][j] =", "of a rotation matrix def write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print,", "from the anchor point of body i to its COM", "// joint cosines\\n') for i in range(1, nb_bodies): out_file.write(' c{}", "new_matrix = sp.zeros(3, 3) # loop on all the matrix", "loop on all the matrix elements for j in range(0,", "for line in f: cut_line = line.split(elem) if len(cut_line) ==", "nb_bodies*[None] xj_print = nb_bodies*[None] xgj_print = nb_bodies*[None] for i in", "body to the current body i # (previous body is", "c_y_Rfoot = cos(in_out.theta_Rfoot[1]);\\n') out_file.write(' c_y_Lfoot = cos(in_out.theta_Lfoot[1]);\\n') out_file.write(' c_z_Rfoot =", "for j in range(0, nb_bodies-1): Rj[i][j] = sp.zeros(3, 3) xj[i][j]", "f: # declaration if len(line.split('// -- variables initialization -- //'))", "matrix of Ri # xji : jacobian of 'xi' #", "frame of the previous body # Rdi : rotational matrix", "(!c_y_torso))\\n {\\n') out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_waist = 1.0 /", "xj, xgj, Rj_print, R[0], 1) R[0] = write_symb_matrix(out_file, R[0], 'R1_',", "# of the anchor point of body i # xgi", "-0.045, 0.045) wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj,", "if it has a shape 'R%a_%b%c' (indexes %a, %b, %c", "Dpt_r_foot_cont[1][1] = y_max Dpt_r_foot_cont[2][1] = y_min Dpt_r_foot_cont[3][1] = y_max for", "}\\n\\n') # from an orientation matrix, compute the roll, pitch,", "Dpt[6] = sp.Matrix([0.0, 0.0, 0.0]) # left leg Dpt[7] =", "return np.array([]) # compute the derivative of an element (for", "get rotation matrix def get_rotation_matrix(axis, direct, cosine, sine): if direct:", "elif axis == 2: return np.array([[cosine, 0.0, sine], [0.0, 1.0,", "= {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') # get a string", "Dpt_l_foot_cont = nb_contacts * [None] for i in range(0, nb_contacts):", "from IMU)\\n') out_file.write(' *\\n') out_file.write(' * Di : position vector", "element def print_declaration_elem(in_file, out_write, elem, nb_max_line): if count_elem(in_file, '{}'.format(elem)) >=", "len(parent_body_index) ## anchor point positions Dpt = nb_bodies*[None] # waist", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_der[{}][{}]", "xgj, 19, 23, -0.02, -0.005, -0.225) torso_waist_angles(out_file, R, om, 0,", "0.0, 1.0]]) else: return np.array([]) # get vector axis def", "derive (string) elem_str = elem_str.replace('- ','-').strip() # derivative axis der_q", "else: flag_first = 1 out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j]))) if k == nb_bodies-1:", ", sp.Symbol('L_2_9') , sp.Symbol('L_3_9')]) Dg[4] = sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')]) Dg[5]", "om_tilde_r_foot = get_tilde(om_r_foot) om_tilde_l_foot = get_tilde(om_l_foot) x_r = x_r_foot +", "contact points jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0", "Dpt_r_foot_cont[i][2] = sp.Symbol('DPT_3_16') for i in range(0, nb_contacts): for j", "torso orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_torso', R[torso_id], 1)", "angle before body i # ci : cosine of the", "= in_out.IMU_Orientation[{}];\\n'.format(i+1, j+1, 3*i+j)) out_file.write('\\n // IMU - angles velocity\\n')", "(jacobian) Rj = nb_bodies*[None] xj = nb_bodies*[None] xgj = nb_bodies*[None]", "x_r_foot = x[r_foot_id] x_l_foot = x[l_foot_id] xp_r_foot = xp[r_foot_id] xp_l_foot", "prefix): out_file.write(' double ') for i in range(0,3): for j", "// IMU - rotation matrices\\n') for i in range(0, 3):", "= nb_contacts * [None] Dpt_l_foot_cont = nb_contacts * [None] for", "of 'xgi' # Rji : jacobian of 'Ri' # return", "its predecessor\\n') out_file.write(' * si : sine of the relative", "// rotation matrices\\n') R = nb_bodies*[None] Rt = nb_bodies*[None] Rd", "out_file.write('\\n') out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n')", "the waist, middle point between the two pitch hip rotations\\n')", "xj, xgj, x_l[j], 'q{}'.format(i+1)) if cur_jac != 0: if not", "'LeftElbPitch_id' # left arm ]) out_file_name = 'forward_kinematics' gen_symbolic_out(out_file_name, nb_bodies,", "point of body i to its COM (center of mass)", "matrices out_file.write('\\n // jacobian rotation matrices\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "R[i].T # jacobian rotation matrices out_file.write('\\n // jacobian rotation matrices\\n')", "in range(0, nb_contacts): Dpt_r_foot_cont[i] = sp.zeros(3, 1) Dpt_l_foot_cont[i] = sp.zeros(3,", "== nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') out_file.write('\\n') out_file.write(' //", "foot absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Lfoot[{}] =", "# # 04 10 # # 05 11 # #", "count = 0 if count != 0: out_write.write(';\\n') # print", "out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_waist = 1.0 / c_y_waist;\\n') out_file.write('", "+= der_elem(cur_term_split[1], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[0].strip()) # other else: print('Error:", "matrix of Ri\\n') out_file.write(' * xji : jacobian of \\'xi\\'\\n')", "0.0, sp.Symbol('DPT_3_48')]) Dpt[23] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')]) ## COM positions", "parent_body_index = np.array([ -1, # waist 0, 1, 2, 3,", "* computation of:\\n') out_file.write(' * COM (center of mass) position", "i in range(0, 3): out_file.write(' in_out.r_COM[{}] = '.format(i)) flag_first =", "'omega_torso', 'torso') # compute the feet position, velocity and orientation", "rot_axis, parent_body_index, Dpt, Dg, M) # end of the file", "// right wrist jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first =", "out_file.write(' in_out.Rwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' //", "def get_string_enum(cur_string): cur_split = cur_string.split('_') if len(cur_split) >= 2: new_string", "om[i] = write_symb_vector(out_file, om[i], 'om{}_'.format(i+1), '') om_tilde[i] = get_tilde(om[i]) #", "0.0]) # left leg Dpt[7] = sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0]) Dpt[8]", "body i # (previous body is not always body i-1),", "relative angle before body i\\n') out_file.write(' *\\n') out_file.write(' * xi", "= sp.zeros(3, 1) Dpt_l_wrist = sp.zeros(3, 1) Dpt_r_wrist[0] = r_wrist_x", "symbolic_jacob_der(Rj, xj, xgj, x_l_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0: if", "-0.06, 0.08, -0.045, 0.045) wrists_compute(out_file, joint_id_names, R, x, xp, om,", ", sp.Symbol('L_2_8') , sp.Symbol('L_3_8')]) Dg[3] = sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9') ,", "\\n') out_file.write(' * (previous body is not always body i-1),", "{}'.format(get_string_enum(joint_id_names[i]))) else: out_file.write(', {}'.format(get_string_enum(joint_id_names[i]))) out_file.write('};\\n\\n') out_file.write('/*! \\\\brief main kinematics computation\\n')", "absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Lfoot[{}] = {};\\n'.format(i,", "2, 3, 4, 5, # right leg 0, 7, 8,", "= print_save_symb_vector(new_vector, 'x{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian of a", "','-').strip() # derivative axis der_q = int(der_var.replace('q','')) # detect positive/negative", "2, 1, 3, 2, 1, 2, # left leg 1,", "{};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' // left foot jacobian\\n') out_file.write(' if", "body_part, omega_in[0])) out_file.write(' in_out.{}[2] = inv_c_y_{} * s_y_{} * (s_z_{}*{}", "= get_tilde(om_l_foot) x_r = x_r_foot + R_r_foot.T * Dpt_r_foot x_l", "= elem_split[1] else: print('Error: {} instead of 1 or 2", "# symbolic variables declarations nb_contacts = 4 x_r_foot = x[r_foot_id]", "out_file.write('\\n') out_file.write(' // left foot absolute velocity\\n') for i in", "if direct: if axis == 1: return np.array([[elem], [0.0], [0.0]])", "range(0, 3): if xgj_print[i][j][k] != None: if not flag_first: flag_first", "from the previous body to the current body i \\n')", "of xgi\\n') out_file.write(' * omi : absolute rotational vector of", "its COM (center of mass) G_i, # expressed in the", "body i to its COM (center of mass) G_i,\\n') out_file.write('", "symbolic output file def gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt,", "= sp.Matrix([sp.Symbol('L_1_12'), 0.0 , sp.Symbol('L_3_12')]) # left leg Dg[7] =", "it has a shape 'R%a_%b%c' (indexes %a, %b, %c also", "','-').replace('-','+-').split('+') if term_list[0] == '': term_list.pop(0) result = 0 #", "return new_string # write the end of the file def", "x_min, x_max, y_min, y_max): # symbolic variables declarations nb_contacts =", "Dg[20] = sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')]) Dg[21] = sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')])", "expressed in the inertial frame)\\n') out_file.write(' * of the anchor", "rotational matrix between body i and its predecessor\\n') out_file.write(' *", "if i == 1: out_file.write('{}'.format(get_string_enum(joint_id_names[i]))) elif count >= 6: count", "Rdi : rotational matrix between body i and its predecessor\\n')", "== nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') if i !=", "om_r_foot = om[r_foot_id] om_l_foot = om[l_foot_id] R_r_foot = R[r_foot_id] R_l_foot", "wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, 19,", "else: return np.array([]) # compute the derivative of an element", "2, # right leg 2, 1, 3, 2, 1, 2,", "i in range (1, nb_bodies): flag_print = 0 for j", "cut_line_2 = line.split(' = ') if len(cut_line_1) == 2 and", "nb_bodies): out_file.write(' Om{} = in_out.qd_mot[{}];\\n'.format(i+1, joint_id_names[i])) # write symbolic vector", "len(elem_split) if cur_len == 1: # positive neg_flag = 0", "'R%a_%b%c' (indexes %a, %b, %c also returned) def isRot(value): try:", "3 * [None] # rotation matrices out_file.write('\\n // rotation matrices\\n')", "of xi # xgpi : derivative of xgi # omi", "nb_bodies): if flag_first: out_file.write(' + {}*{}'.format(M[j], xg[j][i])) else: flag_first =", "float def isInt(value): try: int(value) return True except: return False", ": jacobian of 'Ri' # return true if it is", "cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // right foot contact points absolute", "foot contact points absolute position\\n') for i in range(0, nb_contacts):", "the derivative of an element (for jacobian) def der_elem(elem_str, Rj,", "cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // feet absolute orientation\\n') for i", "s{} = sin(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint relative velocities\\n') for", "wrist jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0 for", "sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1))) R[i] = Rd[i] * R[parent_body_index[i]] write_symb_Rj(nb_bodies, Rj, xj,", "out_file.write(' in_out.r_Rfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' //", "flag_first = 1 out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j]))) if k == nb_bodies-1: if", "R_r_foot.T * Dpt_r_foot x_l = x_l_foot + R_l_foot.T * Dpt_l_foot", "outputs out_file.write(' // right wrist absolute position\\n') for i in", "out_file.write(' // left foot contact points jacobian\\n') out_file.write(' if (flag_jacob)\\n", "i # Omi : rotational vector from the previous body", "2:y, 3:z) rot_axis = np.array([0, # waist 2, 1, 3,", "Dpt[5] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')]) Dpt[6] = sp.Matrix([0.0, 0.0, 0.0])", "always body i-1), expressed in the relative # frame of", "nb_bodies, joint_id_names, M, xg, xgp, xgj) feet_compute(out_file, joint_id_names, R, x,", "double {}'.format(cut_line[0].strip())) else: out_write.write(', {}'.format(cut_line[0].strip())) count += len(cut_line[0].strip()) + 2", "xp[i] = write_symb_vector(out_file, xp[i], 'xp{}_'.format(i+1), '') # jacobian x out_file.write('\\n", "1: out_file.write('{}'.format(get_string_enum(joint_id_names[i]))) elif count >= 6: count = 0 out_file.write(',\\n", "out_file.write(';\\n') else: out_file.write(', ') # print variables declaration def write_variables_declaration(out_file,", "= len(cur_term_split) # no product if cur_len == 1: result", "Rt[parent_id] * Om[i] om[i] = write_symb_vector(out_file, om[i], 'om{}_'.format(i+1), '') om_tilde[i]", "in range(1, nb_bodies): parent_id = parent_body_index[i] Om[i] = get_vector_axis(rot_axis[i], 1,", "shape 'x%a_%b' (indexes %a, %b also returned) def isVec(value): try:", "Dpt_l_foot[2] = sp.Symbol('DPT_3_29') Dpt_r_foot_cont = nb_contacts * [None] Dpt_l_foot_cont =", "# 19 14 23 # # 20 01 24 #", "= {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' // left foot jacobian\\n') out_file.write('", "symbolic variable by its name def write_symb_vector(out_file, vector, start_name, end_name):", "== 2 and len(cut_line_2) == 2: if len(cut_line_2[0].split('[')) == 1:", "range(0, 3): if xj_print[i][j][k] != None: if not flag_first: flag_first", "a, b, c] = isRot(pos_str) [vec_flag, d, e] = isVec(pos_str)", "// left wrist jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first =", ", 'M_10', 'M_11', 'M_12', # right leg 'M_13', 'M_14', 'M_15',", "left arm Dg[20] = sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')]) Dg[21] = sp.Matrix([sp.Symbol('L_1_27'),", "torso_waist_angles(out_file, R, om, 0, 15) # generate the symbolic output", "len(cut_line_2[0].split('[')) == 1: if count == 0: out_write.write(' double {}'.format(cut_line_2[0].strip()))", "R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[1], R_matrix[0]))", "if len(cut_line[0].split('[')) == 1: if count == 0: out_write.write(' double", "nb_contacts * [None] x_l_cont = nb_contacts * [None] # computation", "sp.Symbol('L_2_10'), sp.Symbol('L_3_10')]) Dg[5] = sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')]) Dg[6] = sp.Matrix([sp.Symbol('L_1_12'),", "!= 0: out_write.write(';\\n') # get tilde matrix def get_tilde(v): return", "xgj_print, x_vector, index): # loop on all the joints for", "sp.zeros(3, 1) Dpt_l_foot_cont[i] = sp.zeros(3, 1) Dpt_r_foot_cont[0][0] = x_min Dpt_r_foot_cont[1][0]", "(flag_jacob)\\n {\\n') flag_first = 0 for i in range(0, nb_bodies):", "x_r[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first", "\\\\author <NAME>\\n') out_file.write(' * \\\\file forward_kinematics.cc\\n') out_file.write(' * \\\\brief forward", "frame of the previous body\\n') out_file.write(' * Rdi : rotational", "sp.Symbol('c{}'.format(der_q)) # other else: [rot_flag, a, b, c] = isRot(pos_str)", "wrist absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Rwrist[{}] =", "flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))", "symbolic computation def symbolic_computation(out_file, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg,", "[pi/2 ; 3*pi/2] out_file.write(' in_out.{}[0] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[5], R_matrix[8]))", "# x & xp out_file.write('\\n // anchor point absolute positions", "}\\n') # xg & xgp out_file.write('\\n // com absolute positions", "trunk 2, 1, 3, 2, # right arm 2, 1,", "Dpt, Dg, M): out_file.write('\\n\\n // -- symbolic computation -- //\\n')", "symbolic_jacob_der(Rj, xj, xgj, R_l_elb[j], 'q{}'.format(i+1)) if cur_jac != 0: if", "return result # compute the derivative of an expression (for", "body i and its predecessor\\n') out_file.write(' * si : sine", "out_file.write(' // feet absolute orientation\\n') for i in range(0, 9):", "* frame of the previous body\\n') out_file.write(' * Rdi :", "absolute orientation\\n') for i in range(0, 9): out_file.write(' in_out.Rfoot_or[{}] =", "i in range(0,3): out_file.write(' in_out.rp_Rwrist[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write('", "sp.Symbol('L_2_26'), sp.Symbol('L_3_26')]) Dg[21] = sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')]) Dg[22] = sp.Matrix([sp.Symbol('L_1_28'),", "absolute position vector of the COM G_i of body i", "i in range(1, len(joint_id_names)): count += 1 if i ==", "R[i], 'R{}_'.format(i+1), '') Rt[i] = R[i].T # jacobian rotation matrices", "loop on all the joints for i in range (1,", "DGi : position vector from the anchor point of body", "flag_first = 0 for k in range(0, nb_bodies): if xgj[k][i][j]", "-sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2]", "= cos(in_out.theta_waist[1]);\\n') out_file.write(' c_y_torso = cos(in_out.theta_torso[1]);\\n') out_file.write(' c_z_waist = cos(in_out.theta_waist[2]);\\n')", "3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l_cont[i][k], 'q{}'.format(j+1)) if cur_jac", "out_file.write('\\n') out_file.write(' // torso orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[torso_id],", "outputs out_file.write(' // right foot absolute position\\n') for i in", "atan2(-{}, -{});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) # compute the time derivatives of", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Rfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]),", "xp, om, Rj, xj, xgj, 6, 12, -0.06, 0.08, -0.045,", "np.array([[0.0], [0.0], [elem]]) else: return np.array([]) else: if axis ==", "-- //\\n\\n') print_all_declaration(in_temp, out_file, 100) out_file.write('\\n\\n') # copy temporary file", "(flag_jacob)\\n {\\n') for i in range(1, nb_bodies): for j in", "waist orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[waist_id], 'omega_waist', 'waist') out_file.write('\\n')", "for j in range(0, 9): new_matrix[j] = symbolic_jacob_der(Rj, xj, xgj,", "= sp.Matrix([0.0, 0.0, 0.0]) # right leg Dpt[1] = sp.Matrix([0.0,", "out_file.write(' * // 06 12 //\\n') out_file.write(' * // 07", "3): out_file.write(' omega_{} = in_out.IMU_Angular_Rate[{}];\\n'.format(i+1, i)) out_file.write('\\n // joint cosines\\n')", "# right arm 2, 1, 3, 2 # left arm", "origin, expressed in the inertial frame)\\n') out_file.write(' * of the", "& xp out_file.write('\\n // anchor point absolute positions and velocities\\n')", "+= sp.Symbol('c{}'.format(der_q)) # other else: [rot_flag, a, b, c] =", "22 # left arm ]) nb_bodies = len(parent_body_index) ## anchor", "= r_wrist_x Dpt_r_wrist[1] = r_wrist_y Dpt_r_wrist[2] = r_wrist_z Dpt_l_wrist[0] =", "derivative axis der_q = int(der_var.replace('q','')) # detect positive/negative elem_split =", "= 1 out_file.write('({}*xg{}_{}'.format(M[j], j+1, i+1)) if j == nb_bodies-1: if", "# write symbolic matrix and replace symbolic variable by its", "//\\n') out_file.write(' * // 05 11 //\\n') out_file.write(' * //", "= Rd[i] * R[parent_body_index[i]] write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[i],", "Dpt[18] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')]) Dpt[19] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')])", "the roll, pitch, yaw angles (and derivative) def yaw_pitch_roll_angles(out_file, angle_name,", "{};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' // right wrist jacobian\\n') out_file.write(' if", "= sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0]) Dpt[8] = sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0]) Dpt[9]", "* Dpt_l_foot_cont[i] # writing outputs out_file.write(' // right foot absolute", "inertial frame: located at the origin (waist), but aligned with", "Dg[1] = sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7') , sp.Symbol('L_3_7')]) Dg[2] = sp.Matrix([sp.Symbol('L_1_8')", "sp.Symbol('omega_{}'.format(i+1)) om[0] = write_symb_vector(out_file, om[0], 'om1_', '') om_tilde[0] = get_tilde(om[0])", "waist Dpt[0] = sp.Matrix([0.0, 0.0, 0.0]) # right leg Dpt[1]", "23 # # 20 01 24 # # 02 08", "ci : cosine of the relative angle before body i", "05 11 //\\n') out_file.write(' * // 06 12 //\\n') out_file.write('", "out_file.write('\\n') out_file.write(' // left foot orientation matrix as angles [rad]\\n')", "= -result return result # compute the derivative of an", "else: out_file.write(' }\\n\\n') # from an orientation matrix, compute the", "def feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj,", "* Rji : jacobian of \\'Ri\\'\\n') out_file.write(' */\\n') out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut", "# right leg 0, 7, 8, 9, 10, 11, #", "out_file.write('\\n\\n // -- symbolic computation -- //\\n') # Rj, xj,", "velocity\\n') for i in range(0, 3): out_file.write(' omega_{} = in_out.IMU_Angular_Rate[{}];\\n'.format(i+1,", "product if cur_len == 1: result += der_elem(cur_term_split[0], Rj, xj,", "* xgi : absolute position vector of the COM G_i", "[-sine, cosine, 0.0], [0.0, 0.0, 1.0]]) else: return np.array([]) else:", "{\\n') for i in range(1, nb_bodies): for j in range(0,", "len(cut_line) == 2: if len(cut_line[0].split('[')) == 1: if count ==", "0.0], [-sine, cosine, 0.0], [0.0, 0.0, 1.0]]) else: return np.array([])", "-r_wrist_y Dpt_l_wrist[2] = r_wrist_z # computation om_tilde_r_elb = get_tilde(om_r_elb) om_tilde_l_elb", "sp.Symbol('L_3_14')]) Dg[9] = sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')]) Dg[10] = sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'),", "and len(cut_line_2) == 2: if len(cut_line_2[0].split('[')) == 1: if count", "print def print_save_symb_matrix(matrix, start_name, end_name): new_matrix = sp.zeros(3, 3) save_matrix", "'s{}'.format(der_q): result += sp.Symbol('c{}'.format(der_q)) # other else: [rot_flag, a, b,", "Dg[2] = sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8') , sp.Symbol('L_3_8')]) Dg[3] = sp.Matrix([sp.Symbol('L_1_9')", "xgj, der_var) # one product elif cur_len == 2: result", "= om[l_foot_id] R_r_foot = R[r_foot_id] R_l_foot = R[l_foot_id] Dpt_r_foot =", "cos(in_out.theta_Rfoot[1]);\\n') out_file.write(' c_y_Lfoot = cos(in_out.theta_Lfoot[1]);\\n') out_file.write(' c_z_Rfoot = cos(in_out.theta_Rfoot[2]);\\n') out_file.write('", "i\\n') out_file.write(' * xgi : absolute position vector of the", "out_file.write('\\n // com absolute positions and velocities\\n') xg = nb_bodies*[None]", "end_name) out_file.write(' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','')) new_matrix[i,j] = sp.Symbol(elem_name) if", "true if it has a shape 'R%a_%b%c' (indexes %a, %b,", "variables initialization -- //')) != 1: out_file.write(' // -- variables", "leg 0, 13, 14, # trunk 15, 16, 17, 18,", "try: int(value) return True except: return False # return true", "if count_elem(in_file, '{}'.format(elem)) >= 1: count = 0 with open(in_file,'r')", "out_file.write('\\n') out_file.write(' in_out.r_Lwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write('", "xp[l_elb_id] om_r_elb = om[r_elb_id] om_l_elb = om[l_elb_id] R_r_elb = R[r_elb_id]", "COMAN model\\n') out_file.write(' */\\n\\n') out_file.write('// joints enumeration\\n') out_file.write('enum {') count", "with the ground (info from IMU) # # Di :", "// left foot orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[l_foot_id], 'omega_Lfoot',", "xp_r[i])) out_file.write('\\n') out_file.write(' // right wrist jacobian\\n') out_file.write(' if (flag_jacob)\\n", "s_z_{}*{};\\n'.format(omega_out, body_part, omega_in[1], body_part, omega_in[0])) out_file.write(' in_out.{}[2] = inv_c_y_{} *", "numpy as np import sympy as sp import re import", "xp_r_foot + om_tilde_r_foot * (R_r_foot.T * Dpt_r_foot) xp_l = xp_l_foot", "for i in range(1, nb_bodies): out_file.write(' Om{} = in_out.qd_mot[{}];\\n'.format(i+1, joint_id_names[i]))", "3, 2, 1, 2, # left leg 1, 2, 3,", "np.array([[0.0], [-elem], [0.0]]) elif axis == 3: return np.array([[0.0], [0.0],", "return np.array([[cosine, 0.0, sine], [0.0, 1.0, 0.0], [-sine, 0.0, cosine]])", "100) out_file.write('\\n\\n') # copy temporary file out_file.write(line) out_file.close() # remove", "the file def write_file_end(out_file): out_file.write('}\\n') # print matrix components declaration", "products cur_term_split = cur_term.split('*') cur_len = len(cur_term_split) # no product", "c_y_torso;\\n\\n') out_file.write(' // waist orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[waist_id],", "result += xj[d-1][der_q-1][e-1] # apply negative if neg_flag: result =", "= int(value.split('_')[1][0]) c = int(value.split('_')[1][1]) return True, a, b, c", "0 for i in range(0, nb_contacts): for j in range", "cur_string cur_split = filter(None, re.split(\"([A-Z][^A-Z]*)\", new_string)) new_string = cur_split[0].upper() for", "computation def symbolic_computation(out_file, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M):", "xgp[i] = write_symb_vector(out_file, xgp[i], 'xgp{}_'.format(i+1), '') # jacobian xg out_file.write('\\n", "= atan2({}, {});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, sqrt({}*{}", "'M_16', 'M_17', 'M_18', # left leg 'M_19', 'M_20', 'M_21', #", "Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')]) Dpt[17] = sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0])", "else: print('Error: {} * counted , only implemented for 0", "rot_axis = np.array([0, # waist 2, 1, 3, 2, 1,", "Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[0].strip()) # other else: print('Error: {} *", "and replace symbolic variable by its name def write_symb_matrix(out_file, matrix,", "* (Rt[0] * Dpt[0]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[0],", "out_file.write(' in_out.{}[1] = atan2(-{}, -sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0],", "if axis == 1: return np.array([[elem], [0.0], [0.0]]) elif axis", "wrists position, velocity and orientation def wrists_compute(out_file, joint_id_names, R, x,", "j in range(0, 3): new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j],", "in_out.Rwrist_or[{}] = {};\\n'.format(i, R_r_elb[i])) out_file.write('\\n') for i in range(0, 9):", "leg Dg[1] = sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7') , sp.Symbol('L_3_7')]) Dg[2] =", "\\'Ri\\'\\n') out_file.write(' */\\n') out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\\n{\\n') # compute the center", "com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj): out_file.write(' m_tot =", "forward kinematics computation for the COMAN model\\n') out_file.write(' */\\n\\n') out_file.write('//", "atan2(-{}, -sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write('", "x_max Dpt_r_foot_cont[0][1] = y_min Dpt_r_foot_cont[1][1] = y_max Dpt_r_foot_cont[2][1] = y_min", "declaration def write_matrix_declaration(out_file, prefix): out_file.write(' double ') for i in", "11, # left leg 0, 13, 14, # trunk 15,", "body is not always body i-1), expressed in the relative\\n')", "cur_len = len(elem_split) if cur_len == 1: # positive neg_flag", "returned) def isVec(value): try: a = int(value.split('_')[0].split('x')[1]) b = int(value.split('_')[1])", "0 or 1 !'.format(cur_len-1)) exit() return result # write the", "08 //\\n') out_file.write(' * // 03 09 //\\n') out_file.write(' *", "in range(0, 3): out_file.write(' in_out.r_COM[{}] = '.format(i)) flag_first = 0", "the lines for line in f: cut_line = line.split(' =", "omega_in, omega_out, body_part): out_file.write(' in_out.{}[0] = inv_c_y_{} * (c_z_{}*{} +", "with open(in_temp, 'r') as f: # loop on all the", "2, 1, 2, # left leg 1, 2, 3, #", "9 * [None] for i in range(0,3): for j in", "'q{}'.format(i+1)) [xgj[index-1][i], xgj_print[index-1][i]] = print_save_symb_vector(new_vector, 'xg{}_'.format(index), '_d{}'.format(i+1)) # symbolic computation", "inv_c_y_torso = 1.0 / c_y_torso;\\n\\n') out_file.write(' // waist orientation angle", "der_var)*sp.Symbol(cur_term_split[0].strip()) # other else: print('Error: {} * counted , only", "'TorsoRoll_id' , 'TorsoPitch_id' , 'TorsoYaw_id' , # trunk 'RightShPitch_id' ,", "= Rt[0] * Dpt[0] xp[0] = om_tilde[0] * (Rt[0] *", "rotation axis for each joint before body i (1:x, 2:y,", "variables declarations x_r_elb = x[r_elb_id] x_l_elb = x[l_elb_id] xp_r_elb =", "om_tilde[0] = get_tilde(om[0]) for i in range(1, nb_bodies): parent_id =", "'{}_{}'.format(new_string, cur_split[i].upper()) return new_string # write the end of the", "xgi : absolute position vector of the COM G_i of", "sp.Symbol('L_3_12')]) # left leg Dg[7] = sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')]) Dg[8]", "out_file.write(' *\\n') out_file.write(' * xi : absolute position vector (from", "on all the lines for line in f: cut_line_1 =", "== 2: if len(cut_line[0].split('[')) == 1: if count == 0:", "-1, -1 # count the number of 'elem' in the", "R_matrix, epsilon): if epsilon > 0: # epsilon = 1", "the COM G_i of body i\\n') out_file.write(' * xpi :", "= cos(in_out.theta_torso[2]);\\n\\n') out_file.write(' s_y_waist = sin(in_out.theta_waist[1]);\\n') out_file.write(' s_y_torso = sin(in_out.theta_torso[1]);\\n')", "= cos(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' s_y_Rfoot = sin(in_out.theta_Rfoot[1]);\\n') out_file.write(' s_y_Lfoot = sin(in_out.theta_Lfoot[1]);\\n')", "left wrist jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0", "0 for i in range(0, nb_bodies): for j in range(1,", "sp.Symbol('L_3_25')]) # left arm Dg[20] = sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')]) Dg[21]", "xgj_print, xg[i], i+1) xg[i] = write_symb_vector(out_file, xg[i], 'xg{}_'.format(i+1), '') xgp[i]", "out_file.write(',\\n {}'.format(get_string_enum(joint_id_names[i]))) else: out_file.write(', {}'.format(get_string_enum(joint_id_names[i]))) out_file.write('};\\n\\n') out_file.write('/*! \\\\brief main kinematics", "' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','') new_matrix[i,j] = sp.Symbol(elem_name) return new_matrix,", "x_r = x_r_elb + R_r_elb.T * Dpt_r_wrist x_l = x_l_elb", "for j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_elb[j],", "out_file.write(' * origin: in the waist, middle point between the", "# xgpi : derivative of xgi # omi : absolute", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}]", "right wrist absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Rwrist[{}]", "f: # loop on all the lines for line in", "9, 10, 11, # left leg 0, 13, 14, #", "{};\\n'.format(elem_name, vector[i]).replace('1.0*','') new_vector[i] = sp.Symbol(elem_name) return new_vector, save_vector # save", "i in range(0, 9): out_file.write(' in_out.Lfoot_or[{}] = {};\\n'.format(i, R_l_foot[i])) out_file.write('\\n')", "# waist 2, 1, 3, 2, 1, 2, # right", "= symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xgj[index-1][i], xgj_print[index-1][i]] = print_save_symb_vector(new_vector,", "# Ri : absolute rotational matrix # Rti : transpose", "'M_29' # left arm ]) # joint names joint_id_names =", "get_string_enum(cur_string): cur_split = cur_string.split('_') if len(cur_split) >= 2: new_string =", "{};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') # get a string for", "'M_23', 'M_24', 'M_25', # right arm 'M_26', 'M_27', 'M_28', 'M_29'", "true if it is a float def isInt(value): try: int(value)", "anchor point def write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x_vector, index):", "0 for k in range(0, 3): if xj_print[i][j][k] != None:", "False # return true if it has a shape 'R%a_%b%c'", "= write_symb_vector(out_file, xgp[i], 'xgp{}_'.format(i+1), '') # jacobian xg out_file.write('\\n //", "15, 16, 17, 18, # right arm 15, 20, 21,", ": absolute rotational matrix\\n') out_file.write(' * Rti : transpose matrix", "// jacobian anchor point positions\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "out_file.write('\\n') for i in range(0, 9): out_file.write(' in_out.Lfoot_or[{}] = {};\\n'.format(i,", "if len(line.split('// -- variables initialization -- //')) != 1: out_file.write('", "sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')]) Dg[6] = sp.Matrix([sp.Symbol('L_1_12'), 0.0 , sp.Symbol('L_3_12')]) #", "'RightElbPitch_id', # right arm 'LeftShPitch_id' , 'LeftShRoll_id' , 'LeftShYaw_id' ,", "# left leg 1, 2, 3, # trunk 2, 1,", "# return true if it is a float def isInt(value):", "* Di : position vector from the anchor point of", "sp.Symbol('L_3_16')]) Dg[11] = sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')]) Dg[12] = sp.Matrix([sp.Symbol('L_1_18'), 0.0", "omega out_file.write('\\n // joint absolute velocities\\n') Om = nb_bodies*[None] om", "sp.Matrix([sp.Symbol('L_1_12'), 0.0 , sp.Symbol('L_3_12')]) # left leg Dg[7] = sp.Matrix([sp.Symbol('L_1_13'),", "]) nb_bodies = len(parent_body_index) ## anchor point positions Dpt =", "'M_24', 'M_25', # right arm 'M_26', 'M_27', 'M_28', 'M_29' #", "R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) #", "= x[l_elb_id] xp_r_elb = xp[r_elb_id] xp_l_elb = xp[l_elb_id] om_r_elb =", "= sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0]) Dpt[2] = sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0]) Dpt[3]", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Lwrist_or_der[{}][{}]", "R_l_foot[i])) out_file.write('\\n') out_file.write(' // right foot absolute orientation jacobian\\n') out_file.write('", "== 0 or matrix[i,j] == 1: new_matrix[i,j] = matrix[i,j] else:", "xgj, Rj_print, R_matrix, index): # loop on all the joints", "relative angle before body i\\n') out_file.write(' * ci : cosine", "== 1: if count == 0: out_write.write(' double {}'.format(cut_line[0].strip())) else:", "out_file.write(' in_out.{}[0] = inv_c_y_{} * (c_z_{}*{} + s_z_{}*{});\\n'.format(omega_out, body_part, body_part,", "import re import os ###################### # # # 17 16", "1, sp.Symbol('Om{}'.format(i+1))) om[i] = om[parent_id] + Rt[parent_id] * Om[i] om[i]", "i in range(1, nb_bodies): Rd[i] = get_rotation_matrix(rot_axis[i], 1, sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1)))", "xgj): out_file.write(' m_tot = ') for i in range(0, nb_bodies):", "= './{}_temp.cc'.format(out_file_name) file_temp = open(in_temp, 'w') # beginning of the", "out_file.write(' // right foot absolute velocity\\n') for i in range(0,3):", "out_file.write('\\n') out_file.write(' // left wrist jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "left foot contact points absolute position\\n') for i in range(0,", "(center of mass) position and velocity\\n') out_file.write(' * feet position,", "lines for line in f: # declaration if len(line.split('// --", "'M_25', # right arm 'M_26', 'M_27', 'M_28', 'M_29' # left", "3): if xj_print[i][j][k] != None: if not flag_first: flag_first =", "orientation\\n') for i in range(0, 9): out_file.write(' in_out.Rwrist_or[{}] = {};\\n'.format(i,", "3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1)) if cur_jac", "matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Lfoot', R[l_foot_id], 1) out_file.write('\\n') out_file.write('", "[rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Lfoot', R[l_foot_id], 1) out_file.write('\\n') out_file.write(' c_y_Rfoot = cos(in_out.theta_Rfoot[1]);\\n')", "i in range(0, nb_contacts): for j in range (1, nb_bodies):", "symbolic_computation(out_file, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M): out_file.write('\\n\\n //", "rotation matrices\\n') R = nb_bodies*[None] Rt = nb_bodies*[None] Rd =", "get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left foot contact", "nb_bodies*[None] for i in range(0, nb_bodies): xg[i] = x[i] +", "flag_first: out_file.write(' + {}*{}'.format(M[k], str(xgj[k][i][j]))) else: flag_first = 1 out_file.write('({}*{}'.format(M[k],", "-1 -> pitch angle in [pi/2 ; 3*pi/2] out_file.write(' in_out.{}[0]", "Dpt[11] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')]) Dpt[12] = sp.Matrix([0.0, 0.0, 0.0])", "orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[l_foot_id], 'omega_Lfoot', 'Lfoot') out_file.write('\\n') #", "# left arm Dg[20] = sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')]) Dg[21] =", "!'.format(cur_len-1)) exit() return result # write the beginning of the", "out_file.write(' */\\n\\n') out_file.write('// joints enumeration\\n') out_file.write('enum {') count = 0", "vector of the COM G_i of body i\\n') out_file.write(' *", "axis == 1: return np.array([[-elem], [0.0], [0.0]]) elif axis ==", "but aligned with the ground (info from IMU)\\n') out_file.write(' *\\n')", "R_l_foot.T * Dpt_l_foot_cont[i] # writing outputs out_file.write(' // right foot", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]),", "out_file.write('\\n') return new_vector # write symbolic matrix and replace symbolic", "== 1: if count == 0: out_write.write(' double {}'.format(cut_line_2[0].strip())) else:", "relative velocities\\n') for i in range(1, nb_bodies): out_file.write(' Om{} =", "= sp.Symbol('DPT_3_16') Dpt_l_foot[2] = sp.Symbol('DPT_3_29') Dpt_r_foot_cont = nb_contacts * [None]", "{}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2({},", "= sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')]) Dpt[4] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')]) Dpt[5]", "body_part, omega_in[0], body_part, omega_in[1])) out_file.write(' in_out.{}[1] = c_z_{}*{} - s_z_{}*{};\\n'.format(omega_out,", "20, 21, 22 # left arm ]) nb_bodies = len(parent_body_index)", "3): out_file.write(' in_out.r_Rfoot_cont[{}][{}] = {};\\n'.format(i, j, x_r_cont[i][j])) out_file.write('\\n') out_file.write(' //", "# 07 13 # # # ###################### # # origin:", "line in f: cut_line_1 = line.split(elem) cut_line_2 = line.split(' =", "jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') for i in range(1, nb_bodies):", "xp, om, Rj, xj, xgj, r_foot_id, l_foot_id, x_min, x_max, y_min,", "result += der_elem(cur_term_split[1], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[0].strip()) # other else:", "in range(0, 9): new_matrix[j] = symbolic_jacob_der(Rj, xj, xgj, R_matrix[j], 'q{}'.format(i+1))", "# joint names joint_id_names = np.array(['0', # waist 'RightHipPitch_id', 'RightHipRoll_id',", "int(value.split('_')[1][1]) return True, a, b, c except: return False, -1,", "sp.Symbol('DPT_3_20')]) Dpt[10] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')]) Dpt[11] = sp.Matrix([0.0, 0.0,", "right arm Dg[16] = sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')]) Dg[17] = sp.Matrix([sp.Symbol('L_1_23'),", "# 04 10 # # 05 11 # # 06", "{}'.format(cut_line_2[0].strip())) else: out_write.write(', {}'.format(cut_line_2[0].strip())) count += 1 if count >=", "i in range(1, nb_bodies): parent_id = parent_body_index[i] Om[i] = get_vector_axis(rot_axis[i],", "as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_waist', R[waist_id], 1) out_file.write('\\n') out_file.write(' //", "absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Rfoot[{}] = {};\\n'.format(i,", "main kinematics computation\\n') out_file.write(' *\\n') out_file.write(' * \\\\param[in,out] in_out inputs", "* si : sine of the relative angle before body", "(1, nb_bodies): flag_print = 0 for k in range(0, 3):", "Rd = nb_bodies*[None] Rd[0] = sp.zeros(3, 3) R[0] = sp.zeros(3,", "angle before body i\\n') out_file.write(' * ci : cosine of", "1, 2, 3, # trunk 2, 1, 3, 2, #", "aligned with the ground (info from IMU) # # Di", "between body i and its predecessor\\n') out_file.write(' * si :", "3): out_file.write(' in_out.rp_COM[{}] = '.format(i)) flag_first = 0 for j", "2, # left leg 1, 2, 3, # trunk 2,", "it is a float def isInt(value): try: int(value) return True", "new_string = '{}{}'.format(new_string, cur_split[i]) else: new_string = cur_string cur_split =", "[-v[1], v[0], 0.0]]) # get rotation matrix def get_rotation_matrix(axis, direct,", "1: # positive neg_flag = 0 pos_str = elem_split[0] elif", "else: out_file.write('0.0;\\n') out_file.write('\\n') out_file.write(' // global com jacobian\\n') out_file.write(' if", "a = int(value.split('_')[0].split('R')[1]) b = int(value.split('_')[1][0]) c = int(value.split('_')[1][1]) return", "sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0]) Dpt[22] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')]) Dpt[23] =", "(R_r_elb.T * Dpt_r_wrist) xp_l = xp_l_elb + om_tilde_l_elb * (R_l_elb.T", "for i in range(0,3): out_file.write(' in_out.rp_Rfoot[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n')", "ground (info from IMU) # # Di : position vector", "matrix[i,j] == 1: new_matrix[i,j] = matrix[i,j] save_matrix[3*i+j] = None else:", "matrices\\n') R = nb_bodies*[None] Rt = nb_bodies*[None] Rd = nb_bodies*[None]", "out_file.write(' }\\n\\n') out_file.write(' // right foot contact points absolute position\\n')", "cosines\\n') for i in range(1, nb_bodies): out_file.write(' c{} = cos(in_out.q_mot[{}]);\\n'.format(i+1,", "+ Rt[parent_id] * Dpt[i] xp[i] = xp[parent_id] + om_tilde[parent_id] *", "= sp.Matrix([0.0, 0.0, 0.0]) # left leg Dpt[7] = sp.Matrix([0.0,", "Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')]) Dpt[14] = sp.Matrix([0.0, 0.0, 0.0])", "vector from the anchor point of body i to its", ", 'LeftShYaw_id' , 'LeftElbPitch_id' # left arm ]) out_file_name =", "[xj[index-1][i], xj_print[index-1][i]] = print_save_symb_vector(new_vector, 'x{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian", "xj_print, x[0], 1) x[0] = write_symb_vector(out_file, x[0], 'x1_', '') xp[0]", "Rj, xj, xgj, xgj_print, xg[i], i+1) xg[i] = write_symb_vector(out_file, xg[i],", "i in range(0,3): for j in range(0,3): if matrix[i,j] ==", "elem_str.replace('- ','-').strip() # derivative axis der_q = int(der_var.replace('q','')) # detect", "= om_tilde[0] * (Rt[0] * Dpt[0]) write_symb_xj(nb_bodies, Rj, xj, xgj,", "= sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')]) Dpt[12] = sp.Matrix([0.0, 0.0, 0.0]) #", "returned) def isRot(value): try: a = int(value.split('_')[0].split('R')[1]) b = int(value.split('_')[1][0])", "write_symb_vector(out_file, xg[i], 'xg{}_'.format(i+1), '') xgp[i] = write_symb_vector(out_file, xgp[i], 'xgp{}_'.format(i+1), '')", "j in range(0, 3): out_file.write(' IMU{}{} = in_out.IMU_Orientation[{}];\\n'.format(i+1, j+1, 3*i+j))", "rotations\\n') out_file.write(' * inertial frame: located at the origin (waist),", "the relative angle before body i # ci : cosine", "{}'.format(cut_line[0].strip())) else: out_write.write(', {}'.format(cut_line[0].strip())) count += len(cut_line[0].strip()) + 2 if", "(flag_jacob)\\n {\\n') flag_first = 0 for i in range(0, nb_contacts):", "of the anchor point of body i\\n') out_file.write(' * xgi", "one product elif cur_len == 2: result += der_elem(cur_term_split[0], Rj,", "absolute position\\n') for i in range(0, nb_contacts): for j in", "//\\n\\n') print_all_declaration(in_temp, out_file, 100) out_file.write('\\n\\n') # copy temporary file out_file.write(line)", "i in range(1, len(cur_split)-1): new_string = '{}{}'.format(new_string, cur_split[i]) else: new_string", "in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r_cont[i][k], 'q{}'.format(j+1))", "2 and len(cut_line_2) == 2: if len(cut_line_2[0].split('[')) == 1: if", "= symbolic_jacob_der(Rj, xj, xgj, x_r_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0:", "// left foot absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "components declaration def write_matrix_declaration(out_file, prefix): out_file.write(' double ') for i", "Rt[i] = R[i].T # jacobian rotation matrices out_file.write('\\n // jacobian", "[0.0]]) elif axis == 3: return np.array([[0.0], [0.0], [elem]]) else:", "= atan2(-{}, -sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1]))", "# apply negative if neg_flag: result = -result return result", "0 for k in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj,", "sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')]) Dg[22] = sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')]) Dg[23] =", "x_vector[j], 'q{}'.format(i+1)) [xgj[index-1][i], xgj_print[index-1][i]] = print_save_symb_vector(new_vector, 'xg{}_'.format(index), '_d{}'.format(i+1)) # symbolic", "for print def print_save_symb_matrix(matrix, start_name, end_name): new_matrix = sp.zeros(3, 3)", "omega_{} = in_out.IMU_Angular_Rate[{}];\\n'.format(i+1, i)) out_file.write('\\n // joint cosines\\n') for i", "= write_symb_matrix(out_file, R[0], 'R1_', '') Rt[0] = R[0].T for i", "file def gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M):", "i and its predecessor\\n') out_file.write(' * si : sine of", "// left foot absolute velocity\\n') for i in range(0,3): out_file.write('", "sp.Symbol('L_2_8') , sp.Symbol('L_3_8')]) Dg[3] = sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9') , sp.Symbol('L_3_9')])", "1.0, 0.0], [sine, 0.0, cosine]]) elif axis == 3: return", "sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')]) Dg[19] = sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')]) # left", "sin(in_out.theta_torso[1]);\\n') out_file.write(' s_z_waist = sin(in_out.theta_waist[2]);\\n') out_file.write(' s_z_torso = sin(in_out.theta_torso[2]);\\n\\n') out_file.write('", "xp[i] = xp[parent_id] + om_tilde[parent_id] * (Rt[parent_id] * Dpt[i]) write_symb_xj(nb_bodies,", "body i\\n') out_file.write(' * Ri : absolute rotational matrix\\n') out_file.write('", "class\\n') out_file.write(' *\\n') out_file.write(' * computation of:\\n') out_file.write(' * COM", "k in range(0, nb_bodies): if xgj[k][i][j] != 0: if flag_first:", "left foot jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0", "derivative result = 0 # cosine if pos_str == 'c{}'.format(der_q):", "0.0, sp.Symbol('DPT_3_24')]) Dpt[12] = sp.Matrix([0.0, 0.0, 0.0]) # trunk Dpt[13]", "!= 0: out_write.write(';\\n') # print all declarations def print_all_declaration(in_file, out_write,", "0.0, sine], [0.0, 1.0, 0.0], [-sine, 0.0, cosine]]) elif axis", "i in range(0, nb_bodies): out_file.write('{}'.format(M[i])) if i == nb_bodies-1: out_file.write(';\\n\\n')", "for i in range(0, 3): out_file.write(' in_out.r_COM[{}] = '.format(i)) flag_first", "om[waist_id], 'omega_waist', 'waist') out_file.write('\\n') out_file.write(' // torso orientation angle derivatives", "- angles velocity\\n') for i in range(0, 3): out_file.write(' omega_{}", "atan2(-{}, sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write('", "out_file.write(' if (flag_jacob)\\n {\\n') for i in range(1, nb_bodies): for", "xi # xgpi : derivative of xgi # omi :", "Rj[i] = nb_bodies*[None] xj[i] = nb_bodies*[None] xgj[i] = nb_bodies*[None] Rj_print[i]", "Dg[23] = sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')]) # masses M = np.array([", "nb_bodies): out_file.write('{}'.format(M[i])) if i == nb_bodies-1: out_file.write(';\\n\\n') else: out_file.write(' +", "out_file.write(' in_out.{}[2] = atan2({}, {});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) else: # epsilon", "xp[i], 'xp{}_'.format(i+1), '') # jacobian x out_file.write('\\n // jacobian anchor", "xgj, 6, 12, -0.06, 0.08, -0.045, 0.045) wrists_compute(out_file, joint_id_names, R,", "joint before body i (1:x, 2:y, 3:z) rot_axis = np.array([0,", "elif axis == 3: return np.array([[cosine, -sine, 0.0], [sine, cosine,", "predecessor\\n') out_file.write(' * si : sine of the relative angle", ": transpose matrix of Ri\\n') out_file.write(' * xji : jacobian", "position and velocity def com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp,", "Rj, xj, xgj, r_foot_id, l_foot_id, x_min, x_max, y_min, y_max): #", "j+1, end_name) out_file.write(' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','')) new_matrix[i,j] = sp.Symbol(elem_name)", "lines for line in f: cut_line_1 = line.split(elem) cut_line_2 =", "out_file.write('\\n') return new_matrix # save the symbolic vector for print", "in range(0,3): out_file.write(' in_out.r_Lfoot[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' //", "'LeftShYaw_id' , 'LeftElbPitch_id' # left arm ]) out_file_name = 'forward_kinematics'", "# compute derivative result = 0 # cosine if pos_str", "1 out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j]))) if k == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n')", "; pi/2] out_file.write(' in_out.{}[0] = atan2({}, {});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write('", "Dpt_r_wrist[1] = r_wrist_y Dpt_r_wrist[2] = r_wrist_z Dpt_l_wrist[0] = r_wrist_x Dpt_l_wrist[1]", "feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, r_foot_id,", "= len(parent_body_index) ## anchor point positions Dpt = nb_bodies*[None] #", "R[waist_id], 1) out_file.write('\\n') out_file.write(' // torso orientation matrix as angles", "Dg[18] = sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')]) Dg[19] = sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')])", "in_out.{}[0] = atan2({}, {});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{},", "* [None] for i in range(0,3): if vector[i] == 0", "Dg[19] = sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')]) # left arm Dg[20] =", "j)) flag_first = 0 for k in range(0, nb_bodies): if", "i \\n') out_file.write(' * (previous body is not always body", "sin(in_out.theta_torso[2]);\\n\\n') out_file.write(' if ((!c_y_waist) || (!c_y_torso))\\n {\\n') out_file.write(' return;\\n }\\n\\n')", "[None] for i in range(0, nb_contacts): Dpt_r_foot_cont[i] = sp.zeros(3, 1)", "xj, xgj, xj_print, x[0], 1) x[0] = write_symb_vector(out_file, x[0], 'x1_',", "the enumeration of joints def get_string_enum(cur_string): cur_split = cur_string.split('_') if", "out_file.write(' s_z_waist = sin(in_out.theta_waist[2]);\\n') out_file.write(' s_z_torso = sin(in_out.theta_torso[2]);\\n\\n') out_file.write(' if", "Rt[0] * Dpt[0] xp[0] = om_tilde[0] * (Rt[0] * Dpt[0])", "# xg & xgp out_file.write('\\n // com absolute positions and", "0.0, sp.Symbol('DPT_3_20')]) Dpt[10] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')]) Dpt[11] = sp.Matrix([0.0,", "open(in_file, 'r') as f: # loop on all the lines", "1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n')", "out_file.write(' * expressed in the relative frame of the current", "Dpt[14] = sp.Matrix([0.0, 0.0, 0.0]) Dpt[15] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')])", "(s_z_{}*{} + c_z_{}*{}) + {};\\n'.format(omega_out, body_part, body_part, body_part, omega_in[1], body_part,", "j == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') out_file.write('\\n') out_file.write('", "of an element def print_declaration_elem(in_file, out_write, elem, nb_max_line): if count_elem(in_file,", "out_file.write(' in_out.r_COM_der[{}][{}] = '.format(get_string_enum(joint_id_names[i]), j)) flag_first = 0 for k", "(1, nb_bodies): flag_print = 0 for j in range(0, 3):", "Dg[7] = sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')]) Dg[8] = sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')])", "xgj_print[i] = nb_bodies*[None] for j in range(0, nb_bodies-1): Rj[i][j] =", "names joint_id_names = np.array(['0', # waist 'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id',", "# waist Dpt[0] = sp.Matrix([0.0, 0.0, 0.0]) # right leg", "0.0], [sine, cosine, 0.0], [0.0, 0.0, 1.0]]) else: return np.array([])", "np.array([[cosine, 0.0, sine], [0.0, 1.0, 0.0], [-sine, 0.0, cosine]]) elif", "out_file.write('// joints enumeration\\n') out_file.write('enum {') count = 0 for i", "out_file.write('\\n') out_file.write(' in_out.Lfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write('", "in range (1, nb_bodies): new_matrix = sp.zeros(3, 3) # loop", "for i in range (1, nb_bodies): new_vector = sp.zeros(3, 1)", "point between the two pitch hip rotations # inertial frame:", "end_name) save_vector[i] = ' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','') new_vector[i] =", "# right leg 'LeftHipPitch_id' , 'LeftHipRoll_id' , 'LeftHipYaw_id' , 'LeftKneePitch_id'", "omi : absolute rotational vector of body i\\n') out_file.write(' *", "out_file.write(' // -- variables initialization -- //\\n') out_file.write('\\n // IMU", "i in range(0, 3): out_file.write(' in_out.rp_COM[{}] = '.format(i)) flag_first =", "{}*xgp{}_{}'.format(M[j], j+1, i+1)) else: flag_first = 1 out_file.write('({}*xgp{}_{}'.format(M[j], j+1, i+1))", "in range(0, nb_bodies): if flag_first: out_file.write(' + {}*xgp{}_{}'.format(M[j], j+1, i+1))", "0 or vector[i] == 1: new_vector[i] = vector[i] save_vector[i] =", "= 1 -> pitch angle in [-pi/2 ; pi/2] out_file.write('", "len(cur_split)-1): new_string = '{}{}'.format(new_string, cur_split[i]) else: new_string = cur_string cur_split", "om_tilde_r_elb * (R_r_elb.T * Dpt_r_wrist) xp_l = xp_l_elb + om_tilde_l_elb", "def get_tilde(v): return np.array([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1],", "and velocities\\n') xg = nb_bodies*[None] xgp = nb_bodies*[None] for i", "// right foot absolute position\\n') for i in range(0,3): out_file.write('", "inv_c_y_Rfoot = 1.0 / c_y_Rfoot;\\n') out_file.write(' inv_c_y_Lfoot = 1.0 /", "body i # # xi : absolute position vector (from", "if count != 0: out_write.write(';\\n') # get tilde matrix def", "right foot orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Rfoot', R[r_foot_id],", "and velocities\\n') x = nb_bodies*[None] xp = nb_bodies*[None] x[0] =", "# 06 12 # # 07 13 # # #", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Lfoot_or_der[{}][{}]", "range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l_cont[i][k], 'q{}'.format(j+1)) if", "3): for j in range(0, 3): out_file.write(' IMU{}{} = in_out.IMU_Orientation[{}];\\n'.format(i+1,", "0 or matrix[i,j] == 1: new_matrix[i,j] = matrix[i,j] else: flag_print", "xgj, xj_print, x_vector, index): # loop on all the joints", "orientation matrix, compute the roll, pitch, yaw angles (and derivative)", "[elem]]) else: return np.array([]) else: if axis == 1: return", "((!c_y_waist) || (!c_y_torso))\\n {\\n') out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_waist =", "R_r_foot[i])) out_file.write('\\n') for i in range(0, 9): out_file.write(' in_out.Lfoot_or[{}] =", "1: return np.array([[1.0, 0.0, 0.0], [0.0, cosine, sine], [0.0, -sine,", "// 05 11 //\\n') out_file.write(' * // 06 12 //\\n')", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Lwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j,", "xgj, x_l[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first:", "== 2: return np.array([[cosine, 0.0, sine], [0.0, 1.0, 0.0], [-sine,", "j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left wrist absolute position\\n')", "= xp[i] + om_tilde[i] * (Rt[i] * Dg[i]) write_symb_xgj(nb_bodies, Rj,", "2: if len(cut_line_2[0].split('[')) == 1: if count == 0: out_write.write('", "positions and velocities\\n') x = nb_bodies*[None] xp = nb_bodies*[None] x[0]", "om[i], 'om{}_'.format(i+1), '') om_tilde[i] = get_tilde(om[i]) # x & xp", "= get_tilde(om_r_foot) om_tilde_l_foot = get_tilde(om_l_foot) x_r = x_r_foot + R_r_foot.T", "def yaw_pitch_roll_angles(out_file, angle_name, R_matrix, epsilon): if epsilon > 0: #", "s_z_{}*{});\\n'.format(omega_out, body_part, body_part, omega_in[0], body_part, omega_in[1])) out_file.write(' in_out.{}[1] = c_z_{}*{}", "flag_print: out_file.write('\\n') return new_matrix # save the symbolic vector for", "R_r_elb.T * Dpt_r_wrist x_l = x_l_elb + R_l_elb.T * Dpt_l_wrist", "an anchor point def write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x_vector,", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j,", "get_tilde(om[i]) # x & xp out_file.write('\\n // anchor point absolute", "nb_bodies*[None] xp = nb_bodies*[None] x[0] = Rt[0] * Dpt[0] xp[0]", "= sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7') , sp.Symbol('L_3_7')]) Dg[2] = sp.Matrix([sp.Symbol('L_1_8') ,", "# waist Dg[0] = sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')]) # right leg", "// wrists absolute orientation\\n') for i in range(0, 9): out_file.write('", "om_tilde[0] * (Rt[0] * Dpt[0]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print,", "-0.005, -0.225) torso_waist_angles(out_file, R, om, 0, 15) # generate the", "= '{}{}{}'.format(start_name, i+1, end_name) save_vector[i] = ' {} = {};\\n'.format(elem_name,", "xgj, x_r[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first:", "= symbolic_jacob_der(Rj, xj, xgj, R_l_elb[j], 'q{}'.format(i+1)) if cur_jac != 0:", "i == max: out_file.write(';\\n') else: out_file.write(', ') # variables initialization", "Dg[22] = sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')]) Dg[23] = sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')])", "def isVec(value): try: a = int(value.split('_')[0].split('x')[1]) b = int(value.split('_')[1]) return", "negative if neg_flag: result = -result return result # compute", "absolute rotational matrix\\n') out_file.write(' * Rti : transpose matrix of", "rotation matrices out_file.write('\\n // jacobian rotation matrices\\n') out_file.write(' if (flag_jacob)\\n", "parent_body_index[i] x[i] = x[parent_id] + Rt[parent_id] * Dpt[i] xp[i] =", "xj, xgj, R_l_elb[j], 'q{}'.format(i+1)) if cur_jac != 0: if not", "range(0, 9): out_file.write(' in_out.Lwrist_or[{}] = {};\\n'.format(i, R_l_elb[i])) out_file.write('\\n') out_file.write(' //", "matrix[i,j]).replace('1.0*','')) new_matrix[i,j] = sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return new_matrix #", "1) Rj_print[i][j] = 9 * [None] xj_print[i][j] = 3 *", "of mass) G_i, # expressed in the relative frame of", "= nb_bodies*[None] for j in range(0, nb_bodies-1): Rj[i][j] = sp.zeros(3,", "leg 0, 7, 8, 9, 10, 11, # left leg", "= sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0]) Dpt[22] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')]) Dpt[23]", "cur_split = filter(None, re.split(\"([A-Z][^A-Z]*)\", new_string)) new_string = cur_split[0].upper() for i", "Dpt_r_foot_cont[1][0] = x_min Dpt_r_foot_cont[2][0] = x_max Dpt_r_foot_cont[3][0] = x_max Dpt_r_foot_cont[0][1]", "om[r_foot_id] om_l_foot = om[l_foot_id] R_r_foot = R[r_foot_id] R_l_foot = R[l_foot_id]", "waist, middle point between the two pitch hip rotations\\n') out_file.write('", "* xi : absolute position vector (from origin, expressed in", "'{}{}{}{}'.format(start_name, i+1, j+1, end_name) save_matrix[3*i+j] = ' {} = {};\\n'.format(elem_name,", "positions and velocities\\n') xg = nb_bodies*[None] xgp = nb_bodies*[None] for", "absolute rotational vector of body i # Ri : absolute", "arm ]) out_file_name = 'forward_kinematics' gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names,", "else: elem_name = '{}{}{}'.format(start_name, i+1, end_name) save_vector[i] = ' {}", "sp.zeros(3, 1) save_vector = 3 * [None] for i in", "matrix for print def print_save_symb_matrix(matrix, start_name, end_name): new_matrix = sp.zeros(3,", "{}*{}'.format(M[j], xg[j][i])) else: flag_first = 1 out_file.write('({}*xg{}_{}'.format(M[j], j+1, i+1)) if", "(!c_y_Lfoot))\\n {\\n') out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_Rfoot = 1.0 /", "= line.split(elem) cut_line_2 = line.split(' = ') if len(cut_line_1) ==", "-sine], [0.0, 1.0, 0.0], [sine, 0.0, cosine]]) elif axis ==", "0 # loop on all terms for cur_term in term_list:", "sp.Symbol('L_2_20'), sp.Symbol('L_3_20')]) Dg[15] = sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')]) # right arm", "the matrix elements for j in range(0, 9): new_matrix[j] =", "point def write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, x_vector, index): #", "sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')]) # left arm Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'),", "if count >= nb_max_char: out_write.write(';\\n') count = 0 if count", "get_vector_axis(axis, direct, elem): if direct: if axis == 1: return", "axis == 1: return np.array([[1.0, 0.0, 0.0], [0.0, cosine, -sine],", "for i in range(0, nb_contacts): Dpt_r_foot_cont[i] = sp.zeros(3, 1) Dpt_l_foot_cont[i]", "0, 13, 14, # trunk 15, 16, 17, 18, #", "= x_min Dpt_r_foot_cont[1][0] = x_min Dpt_r_foot_cont[2][0] = x_max Dpt_r_foot_cont[3][0] =", "Rj[i][j] = sp.zeros(3, 3) xj[i][j] = sp.zeros(3, 1) xgj[i][j] =", "absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0", "0.0, 0.0]) # left leg Dpt[7] = sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0])", "out_file.write('\\n') out_file.write(' // right foot absolute velocity\\n') for i in", "1.0 / c_y_torso;\\n\\n') out_file.write(' // waist orientation angle derivatives [rad/s]\\n')", "# # 19 14 23 # # 20 01 24", "x_vector, index): # loop on all the joints for i", "right arm Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')]) Dpt[17] = sp.Matrix([0.0,", "0.0, sp.Symbol('DPT_3_10')]) Dpt[5] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')]) Dpt[6] = sp.Matrix([0.0,", "xgj, xj_print, x[0], 1) x[0] = write_symb_vector(out_file, x[0], 'x1_', '')", "vector for print def print_save_symb_vector(vector, start_name, end_name): new_vector = sp.zeros(3,", "direct: if axis == 1: return np.array([[elem], [0.0], [0.0]]) elif", "the COM G_i of body i # xpi : derivative", "out_file.write(' * xi : absolute position vector (from origin, expressed", "c_z_torso = cos(in_out.theta_torso[2]);\\n\\n') out_file.write(' s_y_waist = sin(in_out.theta_waist[1]);\\n') out_file.write(' s_y_torso =", "= -1 -> pitch angle in [pi/2 ; 3*pi/2] out_file.write('", "cur_string.split('_') if len(cur_split) >= 2: new_string = cur_split[0] for i", "in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l_cont[i][k], 'q{}'.format(j+1))", "cur_split[i].upper()) return new_string # write the end of the file", "xj, xgj, x_r_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0: if not", "the previous body\\n') out_file.write(' * DGi : position vector from", "out_file.write(' Om{} = in_out.qd_mot[{}];\\n'.format(i+1, joint_id_names[i])) # write symbolic vector and", "= 0 for i in range(0,3): for j in range(0,3):", "// right foot contact points absolute position\\n') for i in", "out_file.write(' inv_c_y_Lfoot = 1.0 / c_y_Lfoot;\\n\\n') out_file.write(' // right foot", "* [None] x_l_cont = nb_contacts * [None] # computation om_tilde_r_foot", "= write_symb_vector(out_file, x[i], 'x{}_'.format(i+1), '') xp[i] = write_symb_vector(out_file, xp[i], 'xp{}_'.format(i+1),", "np.array([[0.0], [0.0], [-elem]]) else: return np.array([]) # compute the derivative", "for k in range(0, 9): if Rj_print[i][j][k] != None: if", "+= Rj[a-1][der_q-1][(b-1)*3+(c-1)] # vector elif vec_flag: result += xj[d-1][der_q-1][e-1] #", "// right wrist absolute position\\n') for i in range(0,3): out_file.write('", "R_l_elb.T * Dpt_l_wrist xp_r = xp_r_elb + om_tilde_r_elb * (R_r_elb.T", "Dpt_r_foot_cont[3][0] = x_max Dpt_r_foot_cont[0][1] = y_min Dpt_r_foot_cont[1][1] = y_max Dpt_r_foot_cont[2][1]", "wrist absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Lwrist[{}] =", "True, a, b except: return False, -1, -1 # count", "= 0 for k in range(0, nb_bodies): if xgj[k][i][j] !=", "R_matrix[0])) # compute the time derivatives of 'yaw_pitch_roll_angles' def theta_dot_compute(out_file,", "a shape 'R%a_%b%c' (indexes %a, %b, %c also returned) def", "Dpt_r_wrist = sp.zeros(3, 1) Dpt_l_wrist = sp.zeros(3, 1) Dpt_r_wrist[0] =", "// right foot orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Rfoot',", "out_file.write(', ') # print variables declaration def write_variables_declaration(out_file, prefix, min,", "xgp, xgj) feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj,", "nb_bodies, joint_id_names) # symbolic computation symbolic_computation(file_temp, nb_bodies, joint_id_names, rot_axis, parent_body_index,", "1: return np.array([[1.0, 0.0, 0.0], [0.0, cosine, -sine], [0.0, sine,", "vector of body i\\n') out_file.write(' * Ri : absolute rotational", "write_variables_declaration(out_file, prefix, min, max): out_file.write(' double ') for i in", "k in range(0, 3): if xj_print[i][j][k] != None: if not", "om[0], 'om1_', '') om_tilde[0] = get_tilde(om[0]) for i in range(1,", "vector[i]).replace('1.0*','')) new_vector[i] = sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return new_vector #", "of an expression (for jacobian) def symbolic_jacob_der(Rj, xj, xgj, symb_var,", ", sp.Symbol('L_3_18')]) # trunk Dg[13] = sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')]) Dg[14]", "all the lines for line in f: cut_line = line.split(elem)", "def com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj): out_file.write(' m_tot", "sp.Symbol('L_3_20')]) Dg[15] = sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')]) # right arm Dg[16]", "except: return False, -1, -1, -1 # return true if", "else: out_write.write(', {}'.format(cut_line_2[0].strip())) count += 1 if count >= nb_max_line:", "flag_first = 1 out_file.write('({}*xg{}_{}'.format(M[j], j+1, i+1)) if j == nb_bodies-1:", "= sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')]) Dg[8] = sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')]) Dg[9]", "om, Rj, xj, xgj, r_elb_id, l_elb_id, r_wrist_x, r_wrist_y, r_wrist_z): #", "1 if i == 1: out_file.write('{}'.format(get_string_enum(joint_id_names[i]))) elif count >= 6:", "nb_bodies*[None] Rt = nb_bodies*[None] Rd = nb_bodies*[None] Rd[0] = sp.zeros(3,", "[Rj[index-1][i], Rj_print[index-1][i]] = print_save_symb_matrix(new_matrix, 'R{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian", "# compute the center of mass position and velocity def", "nb_bodies*[None] om = nb_bodies*[None] om_tilde = nb_bodies*[None] Om[0] = sp.zeros(3,", "13 # # # ###################### # # origin: in the", "enumeration\\n') out_file.write('enum {') count = 0 for i in range(1,", "[-pi/2 ; pi/2] out_file.write(' in_out.{}[0] = atan2({}, {});\\n'.format(angle_name, R_matrix[5], R_matrix[8]))", "= open('./{}.cc'.format(out_file_name), 'w') with open(in_temp, 'r') as f: # loop", "in range(0, 3): out_file.write(' in_out.r_Lfoot_cont[{}][{}] = {};\\n'.format(i, j, x_l_cont[i][j])) out_file.write('\\n')", "out_file.write(' // global com absolute position\\n') for i in range(0,", "== 2: count += 1 return count # print the", "out_file.write(' // right wrist jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "max): out_file.write(' double ') for i in range(min, max+1): out_file.write('{}{}'.format(prefix,", "xgj, x_vector[j], 'q{}'.format(i+1)) [xgj[index-1][i], xgj_print[index-1][i]] = print_save_symb_vector(new_vector, 'xg{}_'.format(index), '_d{}'.format(i+1)) #", "all the matrix elements for j in range(0, 9): new_matrix[j]", "end of the file def write_file_end(out_file): out_file.write('}\\n') # print matrix", "global com absolute velocity\\n') for i in range(0, 3): out_file.write('", "xgp out_file.write('\\n // com absolute positions and velocities\\n') xg =", "0.0]) # right leg Dpt[1] = sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0]) Dpt[2]", "(Rt[0] * Dpt[0]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[0], 1)", "= cur_string.split('_') if len(cur_split) >= 2: new_string = cur_split[0] for", "= sin(in_out.theta_waist[1]);\\n') out_file.write(' s_y_torso = sin(in_out.theta_torso[1]);\\n') out_file.write(' s_z_waist = sin(in_out.theta_waist[2]);\\n')", "of 'xi' # xgji : jacobian of 'xgi' # Rji", "9): new_matrix[j] = symbolic_jacob_der(Rj, xj, xgj, R_matrix[j], 'q{}'.format(i+1)) [Rj[index-1][i], Rj_print[index-1][i]]", "out_file.write(' // right wrist absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n", "def print_save_symb_vector(vector, start_name, end_name): new_vector = sp.zeros(3, 1) save_vector =", "== 2 and j == 2: out_file.write(';\\n') else: out_file.write(', ')", "xp[l_foot_id] om_r_foot = om[r_foot_id] om_l_foot = om[l_foot_id] R_r_foot = R[r_foot_id]", "sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')]) Dpt[17] = sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0]) Dpt[18] = sp.Matrix([0.0,", "M): out_file.write('\\n\\n // -- symbolic computation -- //\\n') # Rj,", "orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Rfoot', R[r_foot_id], 1) out_file.write('\\n')", "om_tilde[i] * (Rt[i] * Dg[i]) write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print,", "out_file.write(' in_out.r_Lwrist[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' // left wrist", "left wrist absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Lwrist[{}]", "count == 0: out_write.write(' double {}'.format(cut_line_2[0].strip())) else: out_write.write(', {}'.format(cut_line_2[0].strip())) count", "(waist), but aligned with the ground (info from IMU)\\n') out_file.write('", "= 0 out_file.write(',\\n {}'.format(get_string_enum(joint_id_names[i]))) else: out_file.write(', {}'.format(get_string_enum(joint_id_names[i]))) out_file.write('};\\n\\n') out_file.write('/*! \\\\brief", "== nb_bodies-1: out_file.write(';\\n\\n') else: out_file.write(' + ') out_file.write(' // global", "# remove temporary file os.remove(in_temp) # main script # rotation", "if rot_flag: result += Rj[a-1][der_q-1][(b-1)*3+(c-1)] # vector elif vec_flag: result", "Dpt_r_foot_cont[2][1] = y_min Dpt_r_foot_cont[3][1] = y_max for i in range(0,", ", 'LeftElbPitch_id' # left arm ]) out_file_name = 'forward_kinematics' gen_symbolic_out(out_file_name,", "vector elif vec_flag: result += xj[d-1][der_q-1][e-1] # apply negative if", "j in range(0,3): if matrix[i,j] == 0 or matrix[i,j] ==", "0.0], [0.0, 0.0, 1.0]]) else: return np.array([]) else: if axis", "= {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' // right wrist absolute velocity\\n')", "def get_rotation_matrix(axis, direct, cosine, sine): if direct: if axis ==", "}\\n\\n') out_file.write(' // feet absolute orientation\\n') for i in range(0,", "absolute orientation\\n') for i in range(0, 9): out_file.write(' in_out.Rwrist_or[{}] =", "save_matrix # write symbolic jacobian of a rotation matrix def", "parent_id = parent_body_index[i] x[i] = x[parent_id] + Rt[parent_id] * Dpt[i]", ", sp.Symbol('L_3_12')]) # left leg Dg[7] = sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')])", "write_symb_vector(out_file, xgp[i], 'xgp{}_'.format(i+1), '') # jacobian xg out_file.write('\\n // jacobian", "waist 'M_7' , 'M_8' , 'M_9' , 'M_10', 'M_11', 'M_12',", "'xgp{}_'.format(i+1), '') # jacobian xg out_file.write('\\n // jacobian com absolute", "variables declarations nb_contacts = 4 x_r_foot = x[r_foot_id] x_l_foot =", "leg 2, 1, 3, 2, 1, 2, # left leg", "out_file.write(' // right wrist absolute velocity\\n') for i in range(0,3):", "xgj, Rj_print, R[i], i+1) R[i] = write_symb_matrix(out_file, R[i], 'R{}_'.format(i+1), '')", "count_elem(in_file, elem): count = 0; with open(in_file, 'r') as f:", "out_file.write('\\n') out_file.write(' // left foot contact points jacobian\\n') out_file.write(' if", "matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_waist', R[waist_id], 1) out_file.write('\\n') out_file.write('", "1 !'.format(cur_len-1)) exit() return result # write the beginning of", "j in range(0, nb_bodies-1): Rj[i][j] = sp.zeros(3, 3) xj[i][j] =", "= nb_contacts * [None] for i in range(0, nb_contacts): Dpt_r_foot_cont[i]", "in range(0, 3): out_file.write(' IMU{}{} = in_out.IMU_Orientation[{}];\\n'.format(i+1, j+1, 3*i+j)) out_file.write('\\n", "joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M) # end of the", "# write the beginning of the file def write_file_beginning(out_file, joint_id_names):", "0 if count != 0: out_write.write(';\\n') # print all declarations", "= x_l_foot + R_l_foot.T * Dpt_l_foot_cont[i] # writing outputs out_file.write('", "if it has a shape 'x%a_%b' (indexes %a, %b also", "for i in range(0,3): out_file.write(' in_out.r_Lfoot[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n')", "point def write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x_vector, index): #", "3) # loop on all the matrix elements for j", "index parent_body_index = np.array([ -1, # waist 0, 1, 2,", ": rotational vector from the previous body to the current", "'q{}'.format(i+1)) [xj[index-1][i], xj_print[index-1][i]] = print_save_symb_vector(new_vector, 'x{}_'.format(index), '_d{}'.format(i+1)) # write symbolic", "position vector (from origin, expressed in the inertial frame) #", "= sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')]) Dg[5] = sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')]) Dg[6]", "om_tilde[parent_id] * (Rt[parent_id] * Dpt[i]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print,", "np import sympy as sp import re import os ######################", "nb_bodies): xg[i] = x[i] + Rt[i] * Dg[i] xgp[i] =", "0.0], [sine, 0.0, cosine]]) elif axis == 3: return np.array([[cosine,", "= x[parent_id] + Rt[parent_id] * Dpt[i] xp[i] = xp[parent_id] +", "Dpt_r_foot) xp_l = xp_l_foot + om_tilde_l_foot * (R_l_foot.T * Dpt_l_foot)", "j in range(1, nb_bodies): flag_print = 0 for k in", "'torso') # compute the feet position, velocity and orientation def", "{}*{}'.format(M[k], str(xgj[k][i][j]))) else: flag_first = 1 out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j]))) if k", "vector of body i # Ri : absolute rotational matrix", "elem_split = elem_str.split('-') cur_len = len(elem_split) if cur_len == 1:", "* (s_z_{}*{} + c_z_{}*{}) + {};\\n'.format(omega_out, body_part, body_part, body_part, omega_in[1],", "# writing outputs out_file.write(' // right foot absolute position\\n') for", "[-elem]]) else: return np.array([]) # compute the derivative of an", "the ground (info from IMU) # # Di : position", "= R[i].T # jacobian rotation matrices out_file.write('\\n // jacobian rotation", "R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2(-{}, -{});\\n'.format(angle_name,", "R[0] = sp.zeros(3, 3) for i in range(0, 3): for", "of:\\n') out_file.write(' * COM (center of mass) position and velocity\\n')", "middle point between the two pitch hip rotations # inertial", "symbolic_jacob_der(Rj, xj, xgj, R_l_foot[j], 'q{}'.format(i+1)) if cur_jac != 0: if", "r_wrist_x Dpt_r_wrist[1] = r_wrist_y Dpt_r_wrist[2] = r_wrist_z Dpt_l_wrist[0] = r_wrist_x", "# count the number of 'elem' in the file def", "write_symb_vector(out_file, om[i], 'om{}_'.format(i+1), '') om_tilde[i] = get_tilde(om[i]) # x &", "om, Rj, xj, xgj, r_foot_id, l_foot_id, x_min, x_max, y_min, y_max):", "3) xj[i][j] = sp.zeros(3, 1) xgj[i][j] = sp.zeros(3, 1) Rj_print[i][j]", "angle in [pi/2 ; 3*pi/2] out_file.write(' in_out.{}[0] = atan2(-{}, -{});\\n'.format(angle_name,", "output file out_file = open('./{}.cc'.format(out_file_name), 'w') with open(in_temp, 'r') as", "R, x, xp, om, Rj, xj, xgj, r_elb_id, l_elb_id, r_wrist_x,", "c_y_waist;\\n') out_file.write(' inv_c_y_torso = 1.0 / c_y_torso;\\n\\n') out_file.write(' // waist", "sp.Symbol('L_2_24'), sp.Symbol('L_3_24')]) Dg[19] = sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')]) # left arm", "# # 02 08 # # 03 09 # #", "parent index parent_body_index = np.array([ -1, # waist 0, 1,", "17 16 21 //\\n') out_file.write(' * // 18 15 22", "sp.Symbol(elem_name) return new_vector, save_vector # save the symbolic matrix for", "i\\n') out_file.write(' * ci : cosine of the relative angle", "# Rdi : rotational matrix between body i and its", "count += len(cut_line[0].strip()) + 2 if count >= nb_max_char: out_write.write(';\\n')", "sp.zeros(3, 1) om[0] = sp.zeros(3, 1) for i in range(0,3):", "out_file.write('0.0;\\n') if i != nb_bodies-1: out_file.write('\\n') else: out_file.write(' }\\n\\n') #", "R_matrix[1])) out_file.write(' in_out.{}[2] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) # compute", "sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')]) ## COM positions Dg = nb_bodies*[None] #", "body i # ci : cosine of the relative angle", "cut_line = line.split(' = ') if len(cut_line) == 2: if", "in range(0,3): out_file.write(' in_out.rp_Lwrist[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' //", "x_l_elb + R_l_elb.T * Dpt_l_wrist xp_r = xp_r_elb + om_tilde_r_elb", "2 if count >= nb_max_char: out_write.write(';\\n') count = 0 if", "range(0, nb_contacts): for j in range(0, 3): out_file.write(' in_out.r_Lfoot_cont[{}][{}] =", "nb_bodies): new_vector = sp.zeros(3, 1) # loop on all the", "body i\\n') out_file.write(' *\\n') out_file.write(' * xi : absolute position", "14, # trunk 15, 16, 17, 18, # right arm", ", 'LeftFootRoll_id' , 'LeftFootPitch_id' , # left leg 'TorsoRoll_id' ,", "03 09 # # 04 10 # # 05 11", "if Rj_print[i][j][k] != None: if not flag_first: flag_first = 1", "'LeftShPitch_id' , 'LeftShRoll_id' , 'LeftShYaw_id' , 'LeftElbPitch_id' # left arm", "|| (!c_y_torso))\\n {\\n') out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_waist = 1.0", "= nb_bodies*[None] Rd = nb_bodies*[None] Rd[0] = sp.zeros(3, 3) R[0]", "* counted , only implemented for 0 or 1 !'.format(cur_len-1))", "replace symbolic variable by its name def write_symb_vector(out_file, vector, start_name,", "= 0 for i in range (1, nb_bodies): flag_print =", "x[r_foot_id] x_l_foot = x[l_foot_id] xp_r_foot = xp[r_foot_id] xp_l_foot = xp[l_foot_id]", "1: result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var) # one", "sp.Symbol('L_3_7')]) Dg[2] = sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8') , sp.Symbol('L_3_8')]) Dg[3] =", "out_file.write('\\n') for i in range(0, 9): out_file.write(' in_out.Lwrist_or[{}] = {};\\n'.format(i,", "for i in range(0, 9): out_file.write(' in_out.Lfoot_or[{}] = {};\\n'.format(i, R_l_foot[i]))", "= sp.Symbol('IMU{}{}'.format(i+1, j+1)) write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[0], 1)", "has a shape 'x%a_%b' (indexes %a, %b also returned) def", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}] =", "print all declarations def print_all_declaration(in_file, out_write, nb_max_char): count = 0", "# variables initialization def write_intialization(out_file, nb_bodies, joint_id_names): out_file.write(' // --", "# Rj, xj, xgj and xgj (jacobian) Rj = nb_bodies*[None]", "xj, xgj, R_r_elb[j], 'q{}'.format(i+1)) if cur_jac != 0: if not", "sp.Symbol('DPT_3_43')]) # left arm Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')]) Dpt[21]", "forward_kinematics.cc\\n') out_file.write(' * \\\\brief forward kinematics computation for the COMAN", "1: if count == 0: out_write.write(' double {}'.format(cut_line[0].strip())) else: out_write.write(',", "file_temp.close() # output file out_file = open('./{}.cc'.format(out_file_name), 'w') with open(in_temp,", "out_file.write(' in_out.Lwrist_or[{}] = {};\\n'.format(i, R_l_elb[i])) out_file.write('\\n') out_file.write(' // right wrist", "def isRot(value): try: a = int(value.split('_')[0].split('R')[1]) b = int(value.split('_')[1][0]) c", "in_out.r_Lfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // right", "from an orientation matrix, compute the roll, pitch, yaw angles", "(from origin, expressed in the inertial frame)\\n') out_file.write(' * of", "to its COM (center of mass) G_i,\\n') out_file.write(' * expressed", "if len(cur_split) >= 2: new_string = cur_split[0] for i in", "else: if axis == 1: return np.array([[1.0, 0.0, 0.0], [0.0,", "None else: elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name) save_matrix[3*i+j] =", "[-elem], [0.0]]) elif axis == 3: return np.array([[0.0], [0.0], [-elem]])", "&in_out)\\n{\\n') # compute the center of mass position and velocity", "14 23 //\\n') out_file.write(' * // 20 01 24 //\\n')", "save_vector[i] = None else: elem_name = '{}{}{}'.format(start_name, i+1, end_name) save_vector[i]", "if ((!c_y_waist) || (!c_y_torso))\\n {\\n') out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_waist", "angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[waist_id], 'omega_waist', 'waist') out_file.write('\\n') out_file.write(' //", "orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_waist', R[waist_id], 1) out_file.write('\\n')", ": cosine of the relative angle before body i\\n') out_file.write('", "{};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' // right wrist absolute velocity\\n') for", "nb_bodies): Rj[i] = nb_bodies*[None] xj[i] = nb_bodies*[None] xgj[i] = nb_bodies*[None]", "= sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')]) Dg[9] = sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')]) Dg[10]", "2, 1, 3, 2 # left arm ]) # parent", "out_file.write(' s{} = sin(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint relative velocities\\n')", "x_r = x_r_foot + R_r_foot.T * Dpt_r_foot x_l = x_l_foot", "new_vector = sp.zeros(3, 1) flag_print = 0 for i in", "out_file.write('\\n // anchor point absolute positions and velocities\\n') x =", "xgj, R_r_elb[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first:", "out_file.write(' * xpi : derivative of xi\\n') out_file.write(' * xgpi", "in the inertial frame)\\n') out_file.write(' * of the anchor point", "com absolute position\\n') for i in range(0, 3): out_file.write(' in_out.r_COM[{}]", "{\\n') flag_first = 0 for i in range(0, nb_bodies): for", "= nb_bodies*[None] # waist Dg[0] = sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')]) #", "on all terms for cur_term in term_list: # detect products", "j+1)) write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[0], 1) R[0] =", "= 1 out_file.write('\\n') out_file.write('{}'.format(xj_print[i][j][k])) out_file.write(' }\\n') # xg & xgp", "= sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')]) Dpt[21] = sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0]) Dpt[22]", "out_file.write(' * xji : jacobian of \\'xi\\'\\n') out_file.write(' * xgji", "0.0, -sine], [0.0, 1.0, 0.0], [sine, 0.0, cosine]]) elif axis", "= sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')]) Dg[12] = sp.Matrix([sp.Symbol('L_1_18'), 0.0 , sp.Symbol('L_3_18')])", "') # variables initialization def write_intialization(out_file, nb_bodies, joint_id_names): out_file.write(' //", "out_file.write(' // left wrist jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "cos(in_out.theta_Lfoot[1]);\\n') out_file.write(' c_z_Rfoot = cos(in_out.theta_Rfoot[2]);\\n') out_file.write(' c_z_Lfoot = cos(in_out.theta_Lfoot[2]);\\n\\n') out_file.write('", "cosine, -sine], [0.0, sine, cosine]]) elif axis == 2: return", "body_part, omega_in[1], body_part, omega_in[0])) out_file.write(' in_out.{}[2] = inv_c_y_{} * s_y_{}", "elem_split[0] elif cur_len == 2: # negative neg_flag = 1", "for j in range(0, 3): R[0][i,j] = sp.Symbol('IMU{}{}'.format(i+1, j+1)) write_symb_Rj(nb_bodies,", "the torso def torso_waist_angles(out_file, R, om, waist_id, torso_id): out_file.write(' //", "sin(in_out.theta_waist[1]);\\n') out_file.write(' s_y_torso = sin(in_out.theta_torso[1]);\\n') out_file.write(' s_z_waist = sin(in_out.theta_waist[2]);\\n') out_file.write('", "# vector elif vec_flag: result += xj[d-1][der_q-1][e-1] # apply negative", "* Rdi : rotational matrix between body i and its", "nb_contacts * [None] Dpt_l_foot_cont = nb_contacts * [None] for i", "i in range(0,3): if vector[i] == 0 or vector[i] ==", "trunk 15, 16, 17, 18, # right arm 15, 20,", "out_file.write(' in_out.rp_COM[{}] = '.format(i)) flag_first = 0 for j in", "= str(symb_var).replace('- ','-').replace('-','+-').split('+') if term_list[0] == '': term_list.pop(0) result =", "int(value.split('_')[1][0]) c = int(value.split('_')[1][1]) return True, a, b, c except:", "for i in range(0,3): out_file.write(' in_out.r_Rwrist[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n')", "in f: # declaration if len(line.split('// -- variables initialization --", "= 1 out_file.write('\\n') out_file.write(' in_out.r_Rwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write('", "c_z_{}*{} - s_z_{}*{};\\n'.format(omega_out, body_part, omega_in[1], body_part, omega_in[0])) out_file.write(' in_out.{}[2] =", "with open(in_file, 'r') as f: # loop on all the", "between the two pitch hip rotations\\n') out_file.write(' * inertial frame:", "= line.split(' = ') if len(cut_line) == 2: if len(cut_line[0].split('['))", "range(0, nb_contacts): for j in range(0, 3): Dpt_l_foot_cont[i][j] = Dpt_r_foot_cont[i][j]", "= xp_l_foot + om_tilde_l_foot * (R_l_foot.T * Dpt_l_foot) for i", "out_file.write('};\\n\\n') out_file.write('/*! \\\\brief main kinematics computation\\n') out_file.write(' *\\n') out_file.write(' *", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_der[{}][{}]", "two pitch hip rotations\\n') out_file.write(' * inertial frame: located at", "1) for i in range(0,3): om[0][i] = sp.Symbol('omega_{}'.format(i+1)) om[0] =", "= write_symb_vector(out_file, xp[i], 'xp{}_'.format(i+1), '') # jacobian x out_file.write('\\n //", "sp.Symbol('L_2_15'), sp.Symbol('L_3_15')]) Dg[10] = sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')]) Dg[11] = sp.Matrix([sp.Symbol('L_1_17'),", "position, velocity and orientation def wrists_compute(out_file, joint_id_names, R, x, xp,", "}\\n\\n') out_file.write(' // left wrist absolute position\\n') for i in", "in range(0, nb_bodies): if flag_first: out_file.write(' + {}*{}'.format(M[j], xg[j][i])) else:", "# DGi : position vector from the anchor point of", "out_file.write(' in_out.rp_Lwrist[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' // left wrist", "new_string = '{}_{}'.format(new_string, cur_split[i].upper()) return new_string # write the end", "nb_bodies): for j in range(1, nb_bodies): flag_print = 0 for", ": sine of the relative angle before body i\\n') out_file.write('", "# # # 17 16 21 # # 18 15", "write symbolic matrix and replace symbolic variable by its name", "Rd[i] * R[parent_body_index[i]] write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[i], i+1)", "0; with open(in_file, 'r') as f: # loop on all", "else: return np.array([]) else: if axis == 1: return np.array([[-elem],", "flag_print = 1 elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name) out_file.write('", "R_r_elb[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first", "range(0, 3): out_file.write(' in_out.r_COM_der[{}][{}] = '.format(get_string_enum(joint_id_names[i]), j)) flag_first = 0", "[None] # rotation matrices out_file.write('\\n // rotation matrices\\n') R =", "on all the matrix elements for j in range(0, 9):", "right wrist absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "max: out_file.write(';\\n') else: out_file.write(', ') # variables initialization def write_intialization(out_file,", "Ri # xji : jacobian of 'xi' # xgji :", "0: out_write.write(';\\n') # get tilde matrix def get_tilde(v): return np.array([[0.0,", "else: return np.array([]) # get vector axis def get_vector_axis(axis, direct,", "x_l_foot + R_l_foot.T * Dpt_l_foot_cont[i] # writing outputs out_file.write(' //", "int(der_var.replace('q','')) # detect positive/negative elem_split = elem_str.split('-') cur_len = len(elem_split)", "out_file.write(' // right foot orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file,", "relative frame of the current body i # Omi :", "in_out.rp_Rwrist[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' // right wrist jacobian\\n')", "new_string = cur_split[0] for i in range(1, len(cur_split)-1): new_string =", "if flag_first: out_file.write(' + {}*xgp{}_{}'.format(M[j], j+1, i+1)) else: flag_first =", "out_file.write('}\\n') # print matrix components declaration def write_matrix_declaration(out_file, prefix): out_file.write('", "1: new_matrix[i,j] = matrix[i,j] save_matrix[3*i+j] = None else: elem_name =", "0 for k in range(0, 3): if xgj_print[i][j][k] != None:", "= {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' // right wrist jacobian\\n') out_file.write('", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}] = {};\\n'.format(i,", "in range(0,3): if vector[i] == 0 or vector[i] == 1:", "out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n') out_file.write('", "3: return np.array([[cosine, sine, 0.0], [-sine, cosine, 0.0], [0.0, 0.0,", "xj_print[i] = nb_bodies*[None] xgj_print[i] = nb_bodies*[None] for j in range(0,", ", # trunk 'RightShPitch_id' , 'RightShRoll_id' , 'RightShYaw_id' , 'RightElbPitch_id',", "velocity\\n') for i in range(0, 3): out_file.write(' in_out.rp_COM[{}] = '.format(i))", "= -r_wrist_y Dpt_l_wrist[2] = r_wrist_z # computation om_tilde_r_elb = get_tilde(om_r_elb)", "= sp.zeros(3, 3) flag_print = 0 for i in range(0,3):", "angle before body i # # xi : absolute position", "the symbolic matrix for print def print_save_symb_matrix(matrix, start_name, end_name): new_matrix", "# right arm Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')]) Dpt[17] =", "# 17 16 21 # # 18 15 22 #", "'xp1_', '') for i in range(1, nb_bodies): parent_id = parent_body_index[i]", "for i in range(0,3): for j in range(0,3): out_file.write('{}{}{}'.format(prefix, i+1,", ">= nb_max_char: out_write.write(';\\n') count = 0 if count != 0:", "om[l_foot_id] R_r_foot = R[r_foot_id] R_l_foot = R[l_foot_id] Dpt_r_foot = sp.zeros(3,", "else: if axis == 1: return np.array([[-elem], [0.0], [0.0]]) elif", "nb_bodies): flag_print = 0 for k in range(0, 3): cur_jac", "'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id', # right leg 'LeftHipPitch_id'", "in [pi/2 ; 3*pi/2] out_file.write(' in_out.{}[0] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[5],", "range (1, nb_bodies): flag_print = 0 for j in range(0,", "i in range(1, nb_bodies): out_file.write(' s{} = sin(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n", "xj[i] = nb_bodies*[None] xgj[i] = nb_bodies*[None] Rj_print[i] = nb_bodies*[None] xj_print[i]", "3 * [None] xgj_print[i][j] = 3 * [None] # rotation", "i in range(1, nb_bodies): parent_id = parent_body_index[i] x[i] = x[parent_id]", "sp.Symbol('DPT_3_22')]) Dpt[11] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')]) Dpt[12] = sp.Matrix([0.0, 0.0,", "sp.Matrix([0.0, 0.0, 0.0]) # trunk Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')])", "wrist absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first =", "nb_bodies): for j in range(0, 3): out_file.write(' in_out.r_COM_der[{}][{}] = '.format(get_string_enum(joint_id_names[i]),", "out_file.write(' + {}*{}'.format(M[k], str(xgj[k][i][j]))) else: flag_first = 1 out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j])))", "symbolic jacobian of a rotation matrix def write_symb_Rj(nb_bodies, Rj, xj,", ">= 2: new_string = cur_split[0] for i in range(1, len(cur_split)-1):", "flag_first = 0 for i in range(0, nb_bodies): for j", "count = 0 for i in range(1, len(joint_id_names)): count +=", "flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Lwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))", "== 0: out_write.write(' double {}'.format(cut_line[0].strip())) else: out_write.write(', {}'.format(cut_line[0].strip())) count +=", "[0.0], [elem]]) else: return np.array([]) else: if axis == 1:", "// joint sines\\n') for i in range(1, nb_bodies): out_file.write(' s{}", "== 1: new_matrix[i,j] = matrix[i,j] else: flag_print = 1 elem_name", ", 'LeftShRoll_id' , 'LeftShYaw_id' , 'LeftElbPitch_id' # left arm ])", "out_file.write(' }\\n') # xg & xgp out_file.write('\\n // com absolute", "absolute positions and velocities\\n') x = nb_bodies*[None] xp = nb_bodies*[None]", "0 or vector[i] == 1: new_vector[i] = vector[i] else: flag_print", "and its predecessor # si : sine of the relative", "1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write('", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Lwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]),", "computation for the COMAN model\\n') out_file.write(' */\\n\\n') out_file.write('// joints enumeration\\n')", "feet position, velocity and orientation def feet_compute(out_file, joint_id_names, R, x,", "out_file.write(' // left foot orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file,", "* Dpt_l_foot) for i in range(0, nb_contacts): x_r_cont[i] = x_r_foot", "= line.split(' = ') if len(cut_line_1) == 2 and len(cut_line_2)", "23 //\\n') out_file.write(' * // 20 01 24 //\\n') out_file.write('", "angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Lfoot', R[l_foot_id], 1) out_file.write('\\n') out_file.write(' c_y_Rfoot =", "j+1, end_name) save_matrix[3*i+j] = ' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','') new_matrix[i,j]", "= sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')]) Dg[21] = sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')]) Dg[22]", "xgj, Rj_print, R[0], 1) R[0] = write_symb_matrix(out_file, R[0], 'R1_', '')", "x_r_elb + R_r_elb.T * Dpt_r_wrist x_l = x_l_elb + R_l_elb.T", "sp.Symbol('DPT_3_36')]) Dpt[17] = sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0]) Dpt[18] = sp.Matrix([0.0, 0.0,", "list of all terms term_list = str(symb_var).replace('- ','-').replace('-','+-').split('+') if term_list[0]", "COM (center of mass) G_i,\\n') out_file.write(' * expressed in the", "the inertial frame)\\n') out_file.write(' * of the anchor point of", "== 3: return np.array([[0.0], [0.0], [elem]]) else: return np.array([]) else:", "\\'xgi\\'\\n') out_file.write(' * Rji : jacobian of \\'Ri\\'\\n') out_file.write(' */\\n')", "nb_bodies, joint_id_names, M, xg, xgp, xgj): out_file.write(' m_tot = ')", "in range(0,3): for j in range(0,3): out_file.write('{}{}{}'.format(prefix, i+1, j+1)) if", "') if len(cut_line) == 2: if len(cut_line[0].split('[')) == 1: if", "atan2({}, {});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) else: # epsilon = -1 ->", "in_out.r_Rfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left", "out_file.write(' * \\\\brief forward kinematics computation for the COMAN model\\n')", "// 17 16 21 //\\n') out_file.write(' * // 18 15", "np.array([0, # waist 2, 1, 3, 2, 1, 2, #", "= sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')]) Dg[17] = sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')]) Dg[18]", "x[i], 'x{}_'.format(i+1), '') xp[i] = write_symb_vector(out_file, xp[i], 'xp{}_'.format(i+1), '') #", "3: return np.array([[0.0], [0.0], [elem]]) else: return np.array([]) else: if", "= inv_c_y_{} * (c_z_{}*{} + s_z_{}*{});\\n'.format(omega_out, body_part, body_part, omega_in[0], body_part,", "nb_bodies): out_file.write(' s{} = sin(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint relative", "3): R[0][i,j] = sp.Symbol('IMU{}{}'.format(i+1, j+1)) write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print,", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j,", "= sin(in_out.theta_waist[2]);\\n') out_file.write(' s_z_torso = sin(in_out.theta_torso[2]);\\n\\n') out_file.write(' if ((!c_y_waist) ||", "def isInt(value): try: int(value) return True except: return False #", "Ri\\n') out_file.write(' * xji : jacobian of \\'xi\\'\\n') out_file.write(' *", "out_file.write('\\n') out_file.write(' in_out.Rwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write('", "'waist') out_file.write('\\n') out_file.write(' // torso orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file,", "= sp.zeros(3, 3) xj[i][j] = sp.zeros(3, 1) xgj[i][j] = sp.zeros(3,", "sp.Symbol('L_3_23')]) Dg[18] = sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')]) Dg[19] = sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'),", "import sympy as sp import re import os ###################### #", "* feet position, velocity and orientation\\n') out_file.write(' * waist and", "in range(0, nb_contacts): Dpt_r_foot_cont[i][2] = sp.Symbol('DPT_3_16') for i in range(0,", "2 and j == 2: out_file.write(';\\n') else: out_file.write(', ') #", "range(0, 3): out_file.write(' IMU{}{} = in_out.IMU_Orientation[{}];\\n'.format(i+1, j+1, 3*i+j)) out_file.write('\\n //", "0: out_write.write(' double {}'.format(cut_line_2[0].strip())) else: out_write.write(', {}'.format(cut_line_2[0].strip())) count += 1", "0 for i in range(1, len(joint_id_names)): count += 1 if", "joint_id_names, R, x, xp, om, Rj, xj, xgj, 6, 12,", "return np.array([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]])", "return True, a, b, c except: return False, -1, -1,", "body\\n') out_file.write(' * DGi : position vector from the anchor", "// anchor point absolute positions and velocities\\n') x = nb_bodies*[None]", "sp.Symbol('DPT_2_18'), 0.0]) Dpt[9] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')]) Dpt[10] = sp.Matrix([0.0,", "of mass position and velocity def com_compute(out_file, nb_bodies, joint_id_names, M,", "pitch, yaw angles (and derivative) def yaw_pitch_roll_angles(out_file, angle_name, R_matrix, epsilon):", "int(value.split('_')[0].split('x')[1]) b = int(value.split('_')[1]) return True, a, b except: return", "jacobian xg out_file.write('\\n // jacobian com absolute positions\\n') out_file.write(' if", "range(0, 3): out_file.write(' in_out.r_COM[{}] = '.format(i)) flag_first = 0 for", "of body i\\n') out_file.write(' * xgi : absolute position vector", "if i != nb_bodies-1: out_file.write('\\n') else: out_file.write(' }\\n\\n') # from", "count += 1 if count >= nb_max_line: out_write.write(';\\n') count =", "2 in negative detection !'.format(cur_len)) exit() # compute derivative result", "for i in range(0, 3): out_file.write(' omega_{} = in_out.IMU_Angular_Rate[{}];\\n'.format(i+1, i))", "write_symb_matrix(out_file, R[0], 'R1_', '') Rt[0] = R[0].T for i in", "//\\n\\n') com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj) feet_compute(out_file, joint_id_names,", "out_file.write(' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','')) new_vector[i] = sp.Symbol(elem_name) if flag_print:", "R, x, xp, om, Rj, xj, xgj, 6, 12, -0.06,", "computation symbolic_computation(file_temp, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M) #", "0.0, sp.Symbol('DPT_3_50')]) ## COM positions Dg = nb_bodies*[None] # waist", ", sp.Symbol('L_2_7') , sp.Symbol('L_3_7')]) Dg[2] = sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8') ,", "* Dpt_l_wrist xp_r = xp_r_elb + om_tilde_r_elb * (R_r_elb.T *", "// left wrist absolute velocity\\n') for i in range(0,3): out_file.write('", "in range(1, nb_bodies): parent_id = parent_body_index[i] x[i] = x[parent_id] +", "out_file.write(' * \\\\file forward_kinematics.cc\\n') out_file.write(' * \\\\brief forward kinematics computation", "xgj = nb_bodies*[None] Rj_print = nb_bodies*[None] xj_print = nb_bodies*[None] xgj_print", "# get rotation matrix def get_rotation_matrix(axis, direct, cosine, sine): if", "sp.Symbol('DPT_3_24')]) Dpt[12] = sp.Matrix([0.0, 0.0, 0.0]) # trunk Dpt[13] =", "= c_z_{}*{} - s_z_{}*{};\\n'.format(omega_out, body_part, omega_in[1], body_part, omega_in[0])) out_file.write(' in_out.{}[2]", "the number of 'elem' in the file def count_elem(in_file, elem):", "R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2({}, {});\\n'.format(angle_name,", "0.0, sp.Symbol('DPT_3_41')]) Dpt[19] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')]) # left arm", "sin(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint relative velocities\\n') for i in", "axis def get_vector_axis(axis, direct, elem): if direct: if axis ==", "True except: return False # return true if it has", "else: return np.array([]) else: if axis == 1: return np.array([[1.0,", "M): # temporary file in_temp = './{}_temp.cc'.format(out_file_name) file_temp = open(in_temp,", "axis == 3: return np.array([[0.0], [0.0], [-elem]]) else: return np.array([])", "int(value) return True except: return False # return true if", "= 1 elem_name = '{}{}{}'.format(start_name, i+1, end_name) out_file.write(' {} =", "jacobian anchor point positions\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first =", "len(cur_term_split) # no product if cur_len == 1: result +=", "== 1: new_matrix[i,j] = matrix[i,j] save_matrix[3*i+j] = None else: elem_name", "= cur_term.split('*') cur_len = len(cur_term_split) # no product if cur_len", "new_vector = sp.zeros(3, 1) # loop on all the vector", "= sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')]) # masses M = np.array([ 'M_6',", "'M_20', 'M_21', # trunk 'M_22', 'M_23', 'M_24', 'M_25', # right", "s_z_Lfoot = sin(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' if ((!c_y_Rfoot) || (!c_y_Lfoot))\\n {\\n') out_file.write('", "+ {}*{}'.format(M[j], xg[j][i])) else: flag_first = 1 out_file.write('({}*xg{}_{}'.format(M[j], j+1, i+1))", "inv_c_y_{} * (c_z_{}*{} + s_z_{}*{});\\n'.format(omega_out, body_part, body_part, omega_in[0], body_part, omega_in[1]))", "joint_id_names) # symbolic computation symbolic_computation(file_temp, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt,", "1 out_file.write('\\n') out_file.write(' in_out.r_Rfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n')", "= isVec(pos_str) # rotation matrix if rot_flag: result += Rj[a-1][der_q-1][(b-1)*3+(c-1)]", "out_file.write('\\n') out_file.write(' // left foot orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file,", "range (1, nb_bodies): flag_print = 0 for j in range(0,9):", "// right wrist absolute velocity\\n') for i in range(0,3): out_file.write('", "sp.Symbol('L_2_29'), sp.Symbol('L_3_29')]) # masses M = np.array([ 'M_6', # waist", "cur_term in term_list: # detect products cur_term_split = cur_term.split('*') cur_len", "elem): count = 0; with open(in_file, 'r') as f: #", "out_file.write(' * xgji : jacobian of \\'xgi\\'\\n') out_file.write(' * Rji", "# variables initialization write_intialization(file_temp, nb_bodies, joint_id_names) # symbolic computation symbolic_computation(file_temp,", "= sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')]) # right leg Dg[1] = sp.Matrix([sp.Symbol('L_1_7')", "= xp[l_elb_id] om_r_elb = om[r_elb_id] om_l_elb = om[l_elb_id] R_r_elb =", "## COM positions Dg = nb_bodies*[None] # waist Dg[0] =", "the relative frame of the current body i\\n') out_file.write(' *", "0: if not flag_first: flag_first = 1 flag_print = 1", "si : sine of the relative angle before body i", "nb_bodies*[None] om_tilde = nb_bodies*[None] Om[0] = sp.zeros(3, 1) om[0] =", "out_file.write(', ') # variables initialization def write_intialization(out_file, nb_bodies, joint_id_names): out_file.write('", "= sp.Symbol(elem_name) return new_vector, save_vector # save the symbolic matrix", "04 10 //\\n') out_file.write(' * // 05 11 //\\n') out_file.write('", "//')) != 1: out_file.write(' // -- variables declaration -- //\\n\\n')", "xgj, R_l_foot[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first:", "cosine]]) elif axis == 2: return np.array([[cosine, 0.0, sine], [0.0,", "pos_str = elem_split[1] else: print('Error: {} instead of 1 or", "{};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','')) new_matrix[i,j] = sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return new_matrix", "x_l_elb = x[l_elb_id] xp_r_elb = xp[r_elb_id] xp_l_elb = xp[l_elb_id] om_r_elb", "d, e] = isVec(pos_str) # rotation matrix if rot_flag: result", "+ 2 if count >= nb_max_char: out_write.write(';\\n') count = 0", "result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var) # one product", "// jacobian rotation matrices\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first =", "xgj, xj_print, x[i], i+1) x[i] = write_symb_vector(out_file, x[i], 'x{}_'.format(i+1), '')", "0 # cosine if pos_str == 'c{}'.format(der_q): result += -sp.Symbol('s{}'.format(der_q))", "out_file.write('\\n') out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n')", "the relative # frame of the previous body # Rdi", "out_file.write('\\n') out_file.write(' // left foot jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "= elem_str.split('-') cur_len = len(elem_split) if cur_len == 1: #", "= 1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('", "== 0 or vector[i] == 1: new_vector[i] = vector[i] save_vector[i]", "= om[l_elb_id] R_r_elb = R[r_elb_id] R_l_elb = R[l_elb_id] Dpt_r_wrist =", "orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[r_foot_id], 'omega_Rfoot', 'Rfoot') out_file.write('\\n') out_file.write('", "right arm 'LeftShPitch_id' , 'LeftShRoll_id' , 'LeftShYaw_id' , 'LeftElbPitch_id' #", "body_part, omega_in[0], omega_in[2])) # angles (position and derivative) of the", "xg, xgp, xgj): out_file.write(' m_tot = ') for i in", "its predecessor # si : sine of the relative angle", "omega_in[1], body_part, omega_in[0], omega_in[2])) # angles (position and derivative) of", "x[i] = write_symb_vector(out_file, x[i], 'x{}_'.format(i+1), '') xp[i] = write_symb_vector(out_file, xp[i],", "writing outputs out_file.write(' // right wrist absolute position\\n') for i", "= nb_contacts * [None] # computation om_tilde_r_foot = get_tilde(om_r_foot) om_tilde_l_foot", "com absolute positions and velocities\\n') xg = nb_bodies*[None] xgp =", "out_file.write(' in_out.{}[2] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) # compute the", "# xgi : absolute position vector of the COM G_i", "print('Error: {} * counted , only implemented for 0 or", "sp.Symbol('L_2_7') , sp.Symbol('L_3_7')]) Dg[2] = sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8') , sp.Symbol('L_3_8')])", "j, cur_jac)) out_file.write(' }\\n\\n') # get a string for the", "relative\\n') out_file.write(' * frame of the previous body\\n') out_file.write(' *", "new_matrix # save the symbolic vector for print def print_save_symb_vector(vector,", "'xi' # xgji : jacobian of 'xgi' # Rji :", "cosine]]) elif axis == 2: return np.array([[cosine, 0.0, -sine], [0.0,", "sp.Symbol('L_2_28'), sp.Symbol('L_3_28')]) Dg[23] = sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')]) # masses M", "out_file.write(' c{} = cos(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint sines\\n') for", "{};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // wrists absolute orientation\\n')", "sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')]) Dg[15] = sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')]) # right", "body_part): out_file.write(' in_out.{}[0] = inv_c_y_{} * (c_z_{}*{} + s_z_{}*{});\\n'.format(omega_out, body_part,", "# get tilde matrix def get_tilde(v): return np.array([[0.0, -v[2], v[1]],", "{};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' // right foot jacobian\\n') out_file.write(' if", "return np.array([]) else: if axis == 1: return np.array([[1.0, 0.0,", "'omega_waist', 'waist') out_file.write('\\n') out_file.write(' // torso orientation angle derivatives [rad/s]\\n')", "sine, 0.0], [-sine, cosine, 0.0], [0.0, 0.0, 1.0]]) else: return", "range(0,3): om[0][i] = sp.Symbol('omega_{}'.format(i+1)) om[0] = write_symb_vector(out_file, om[0], 'om1_', '')", "jacobian of \\'Ri\\'\\n') out_file.write(' */\\n') out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\\n{\\n') # compute", "-{});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, -sqrt({}*{} + {}*{}));\\n'.format(angle_name,", "count = 0 with open(in_file,'r') as f: # loop on", "i+1, end_name) save_vector[i] = ' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','') new_vector[i]", "[rad/s]\\n') theta_dot_compute(out_file, om[r_foot_id], 'omega_Rfoot', 'Rfoot') out_file.write('\\n') out_file.write(' // left foot", "the center of mass position and velocity def com_compute(out_file, nb_bodies,", "i to its COM (center of mass) G_i,\\n') out_file.write(' *", "matrix between body i and its predecessor\\n') out_file.write(' * si", "'': term_list.pop(0) result = 0 # loop on all terms", "i in range(0,3): out_file.write(' in_out.rp_Lfoot[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write('", "# symbolic computation def symbolic_computation(out_file, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt,", "Dpt[17] = sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0]) Dpt[18] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')])", "matrix[i,j] == 0 or matrix[i,j] == 1: new_matrix[i,j] = matrix[i,j]", ", sp.Symbol('L_3_8')]) Dg[3] = sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9') , sp.Symbol('L_3_9')]) Dg[4]", "sp.Symbol('L_2_9') , sp.Symbol('L_3_9')]) Dg[4] = sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')]) Dg[5] =", ", 'RightShYaw_id' , 'RightElbPitch_id', # right arm 'LeftShPitch_id' , 'LeftShRoll_id'", "range(0,3): out_file.write(' in_out.r_Lfoot[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' // left", "mass position and velocity def com_compute(out_file, nb_bodies, joint_id_names, M, xg,", "* ////////////////////////\\n') out_file.write(' * // //\\n') out_file.write(' * // 17", "cosine, 0.0], [0.0, 0.0, 1.0]]) else: return np.array([]) else: if", "of the waist and the torso def torso_waist_angles(out_file, R, om,", "in range(0, nb_contacts): for j in range(0, 3): Dpt_l_foot_cont[i][j] =", "by its name def write_symb_vector(out_file, vector, start_name, end_name): new_vector =", "write symbolic jacobian of a rotation matrix def write_symb_Rj(nb_bodies, Rj,", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rwrist_der[{}][{}]", "def get_vector_axis(axis, direct, elem): if direct: if axis == 1:", "out_file.write(' in_out.rp_Lfoot[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' // left foot", "0.0]) Dpt[8] = sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0]) Dpt[9] = sp.Matrix([0.0, 0.0,", "# expressed in the relative frame of the current body", "// left foot jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first =", "= {};\\n'.format(i, j, x_l_cont[i][j])) out_file.write('\\n') out_file.write(' // left foot contact", "= sp.zeros(3, 1) om[0] = sp.zeros(3, 1) for i in", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lwrist_der[{}][{}]", "cos(in_out.theta_Rfoot[2]);\\n') out_file.write(' c_z_Lfoot = cos(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' s_y_Rfoot = sin(in_out.theta_Rfoot[1]);\\n') out_file.write('", "(1, nb_bodies): new_matrix = sp.zeros(3, 3) # loop on all", "###################### # # origin: in the waist, middle point between", "range(0, nb_bodies): if flag_first: out_file.write(' + {}*xgp{}_{}'.format(M[j], j+1, i+1)) else:", "as np import sympy as sp import re import os", ", 'M_8' , 'M_9' , 'M_10', 'M_11', 'M_12', # right", "# compute the wrists position, velocity and orientation def wrists_compute(out_file,", "new_string # write the end of the file def write_file_end(out_file):", "point absolute positions and velocities\\n') x = nb_bodies*[None] xp =", "elif vec_flag: result += xj[d-1][der_q-1][e-1] # apply negative if neg_flag:", "variables initialization write_intialization(file_temp, nb_bodies, joint_id_names) # symbolic computation symbolic_computation(file_temp, nb_bodies,", "for i in range(0,3): out_file.write(' in_out.rp_Lwrist[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n')", "angles (position and derivative) of the waist and the torso", "(indexes %a, %b also returned) def isVec(value): try: a =", "1: new_vector[i] = vector[i] save_vector[i] = None else: elem_name =", "'{}{}'.format(new_string, cur_split[i]) else: new_string = cur_string cur_split = filter(None, re.split(\"([A-Z][^A-Z]*)\",", "sp.zeros(3, 1) flag_print = 0 for i in range(0,3): if", "r_wrist_y, r_wrist_z): # symbolic variables declarations x_r_elb = x[r_elb_id] x_l_elb", "# trunk Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')]) Dpt[14] = sp.Matrix([0.0,", "c] = isRot(pos_str) [vec_flag, d, e] = isVec(pos_str) # rotation", "Rj, xj, xgj, 6, 12, -0.06, 0.08, -0.045, 0.045) wrists_compute(out_file,", "write_file_beginning(out_file, joint_id_names): out_file.write('/*! \\n') out_file.write(' * \\\\author <NAME>\\n') out_file.write(' *", "and velocity def com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj):", "orientation def feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj,", ": derivative of xi\\n') out_file.write(' * xgpi : derivative of", "xj, xgj, r_foot_id, l_foot_id, x_min, x_max, y_min, y_max): # symbolic", "R_r_foot[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first", "velocities\\n') Om = nb_bodies*[None] om = nb_bodies*[None] om_tilde = nb_bodies*[None]", "xj, xgj, R_l_foot[j], 'q{}'.format(i+1)) if cur_jac != 0: if not", "anchor point absolute positions and velocities\\n') x = nb_bodies*[None] xp", "sp.zeros(3, 1) Dpt_r_foot[2] = sp.Symbol('DPT_3_16') Dpt_l_foot[2] = sp.Symbol('DPT_3_29') Dpt_r_foot_cont =", "of mass) G_i,\\n') out_file.write(' * expressed in the relative frame", "flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Rwrist_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))", "i in range(0, nb_bodies): Rj[i] = nb_bodies*[None] xj[i] = nb_bodies*[None]", "apply negative if neg_flag: result = -result return result #", "x_max Dpt_r_foot_cont[3][0] = x_max Dpt_r_foot_cont[0][1] = y_min Dpt_r_foot_cont[1][1] = y_max", "inv_c_y_{} * s_y_{} * (s_z_{}*{} + c_z_{}*{}) + {};\\n'.format(omega_out, body_part,", "}\\n') # omega out_file.write('\\n // joint absolute velocities\\n') Om =", "= sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8') , sp.Symbol('L_3_8')]) Dg[3] = sp.Matrix([sp.Symbol('L_1_9') ,", "com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj) feet_compute(out_file, joint_id_names, R,", "nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') out_file.write('\\n') out_file.write(' // global", "sp.Symbol('DPT_3_37')]) Dpt[21] = sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0]) Dpt[22] = sp.Matrix([0.0, 0.0,", "result = -result return result # compute the derivative of", "cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left wrist absolute orientation jacobian\\n')", "of 'yaw_pitch_roll_angles' def theta_dot_compute(out_file, omega_in, omega_out, body_part): out_file.write(' in_out.{}[0] =", "save the symbolic matrix for print def print_save_symb_matrix(matrix, start_name, end_name):", "variables declaration def write_variables_declaration(out_file, prefix, min, max): out_file.write(' double ')", "expressed in the relative # frame of the previous body", "terms for cur_term in term_list: # detect products cur_term_split =", "sp.Symbol('DPT_2_46'), 0.0]) Dpt[22] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')]) Dpt[23] = sp.Matrix([0.0,", "1 out_file.write('({}*xgp{}_{}'.format(M[j], j+1, i+1)) if j == nb_bodies-1: if flag_first:", "* // //\\n') out_file.write(' * // 17 16 21 //\\n')", "matrix and replace symbolic variable by its name def write_symb_matrix(out_file,", "= vector[i] else: flag_print = 1 elem_name = '{}{}{}'.format(start_name, i+1,", "xp_l[i])) out_file.write('\\n') out_file.write(' // left foot jacobian\\n') out_file.write(' if (flag_jacob)\\n", "rotation matrix def get_rotation_matrix(axis, direct, cosine, sine): if direct: if", "k in range(0, 3): if xgj_print[i][j][k] != None: if not", "for k in range(0, 3): if xgj_print[i][j][k] != None: if", "i\\n') out_file.write(' *\\n') out_file.write(' * xi : absolute position vector", "s_z_Rfoot = sin(in_out.theta_Rfoot[2]);\\n') out_file.write(' s_z_Lfoot = sin(in_out.theta_Lfoot[2]);\\n\\n') out_file.write(' if ((!c_y_Rfoot)", "out_file.write(' * si : sine of the relative angle before", "der_var) # one product elif cur_len == 2: result +=", "count >= 6: count = 0 out_file.write(',\\n {}'.format(get_string_enum(joint_id_names[i]))) else: out_file.write(',", "str(xgj[k][i][j]))) else: flag_first = 1 out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j]))) if k ==", "= x_max Dpt_r_foot_cont[3][0] = x_max Dpt_r_foot_cont[0][1] = y_min Dpt_r_foot_cont[1][1] =", "j in range(0, 3): Dpt_l_foot_cont[i][j] = Dpt_r_foot_cont[i][j] x_r_cont = nb_contacts", "in the file def count_elem(in_file, elem): count = 0; with", "a = int(value.split('_')[0].split('x')[1]) b = int(value.split('_')[1]) return True, a, b", "range(0,3): out_file.write(' in_out.rp_Rfoot[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' // right", "len(cur_split) >= 2: new_string = cur_split[0] for i in range(1,", "body to the current body i \\n') out_file.write(' * (previous", "' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','') new_vector[i] = sp.Symbol(elem_name) return new_vector,", "matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Rfoot', R[r_foot_id], 1) out_file.write('\\n') out_file.write('", ": derivative of xgi\\n') out_file.write(' * omi : absolute rotational", "i in range(0,3): out_file.write(' in_out.r_Lwrist[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write('", "out_file.write(' // left foot jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "cur_split = cur_string.split('_') if len(cur_split) >= 2: new_string = cur_split[0]", "if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') out_file.write('\\n') out_file.write(' // global com", "line in f: # declaration if len(line.split('// -- variables initialization", "position vector from the anchor point of body i to", ", 'RightElbPitch_id', # right arm 'LeftShPitch_id' , 'LeftShRoll_id' , 'LeftShYaw_id'", "<NAME>\\n') out_file.write(' * \\\\file forward_kinematics.cc\\n') out_file.write(' * \\\\brief forward kinematics", "sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')]) Dpt[21] = sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0]) Dpt[22] =", "sp.Symbol('L_3_27')]) Dg[22] = sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')]) Dg[23] = sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'),", "matrix[i,j] save_matrix[3*i+j] = None else: elem_name = '{}{}{}{}'.format(start_name, i+1, j+1,", "1, 2, # right leg 2, 1, 3, 2, 1,", "# get vector axis def get_vector_axis(axis, direct, elem): if direct:", "R[r_elb_id] R_l_elb = R[l_elb_id] Dpt_r_wrist = sp.zeros(3, 1) Dpt_l_wrist =", "i in range(min, max+1): out_file.write('{}{}'.format(prefix, i)) if i == max:", "Om = nb_bodies*[None] om = nb_bodies*[None] om_tilde = nb_bodies*[None] Om[0]", "* [None] xj_print[i][j] = 3 * [None] xgj_print[i][j] = 3", "i+1)) if j == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n')", "of the file write_file_end(file_temp) file_temp.close() # output file out_file =", "# trunk 'M_22', 'M_23', 'M_24', 'M_25', # right arm 'M_26',", "from the previous body to the current body i #", "2: return np.array([[0.0], [elem], [0.0]]) elif axis == 3: return", "6: count = 0 out_file.write(',\\n {}'.format(get_string_enum(joint_id_names[i]))) else: out_file.write(', {}'.format(get_string_enum(joint_id_names[i]))) out_file.write('};\\n\\n')", "xp_l_elb + om_tilde_l_elb * (R_l_elb.T * Dpt_l_wrist) # writing outputs", "in_out.Lfoot_or[{}] = {};\\n'.format(i, R_l_foot[i])) out_file.write('\\n') out_file.write(' // right foot absolute", "* (Rt[parent_id] * Dpt[i]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[i],", "out_file.write('{}'.format(xj_print[i][j][k])) out_file.write(' }\\n') # xg & xgp out_file.write('\\n // com", "def write_matrix_declaration(out_file, prefix): out_file.write(' double ') for i in range(0,3):", "= matrix[i,j] else: flag_print = 1 elem_name = '{}{}{}{}'.format(start_name, i+1,", "sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')]) Dg[21] = sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')]) Dg[22] =", "R[0].T for i in range(1, nb_bodies): Rd[i] = get_rotation_matrix(rot_axis[i], 1,", "R_r_elb = R[r_elb_id] R_l_elb = R[l_elb_id] Dpt_r_wrist = sp.zeros(3, 1)", "i in range(0, 9): out_file.write(' in_out.Lwrist_or[{}] = {};\\n'.format(i, R_l_elb[i])) out_file.write('\\n')", "theta_dot_compute(out_file, om[l_foot_id], 'omega_Lfoot', 'Lfoot') out_file.write('\\n') # compute the wrists position,", "i in range(1, nb_bodies): for j in range(0, 3): out_file.write('", "//\\n') out_file.write(' * // //\\n') out_file.write(' * ////////////////////////\\n') out_file.write(' *\\n')", "i\\n') out_file.write(' * Ri : absolute rotational matrix\\n') out_file.write(' *", "symbolic variable by its name def write_symb_matrix(out_file, matrix, start_name, end_name):", "new_vector, save_vector # save the symbolic matrix for print def", "'M_21', # trunk 'M_22', 'M_23', 'M_24', 'M_25', # right arm", "range(0, nb_contacts): Dpt_r_foot_cont[i][2] = sp.Symbol('DPT_3_16') for i in range(0, nb_contacts):", "derivative of xi # xgpi : derivative of xgi #", "else: flag_print = 1 elem_name = '{}{}{}'.format(start_name, i+1, end_name) out_file.write('", "'M_11', 'M_12', # right leg 'M_13', 'M_14', 'M_15', 'M_16', 'M_17',", "12 //\\n') out_file.write(' * // 07 13 //\\n') out_file.write(' *", "sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0]) Dpt[3] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')]) Dpt[4] =", "= nb_bodies*[None] Rd[0] = sp.zeros(3, 3) R[0] = sp.zeros(3, 3)", ": rotational matrix between body i and its predecessor #", "tilde matrix def get_tilde(v): return np.array([[0.0, -v[2], v[1]], [v[2], 0.0,", "anchor point of body i\\n') out_file.write(' * xgi : absolute", "{} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','')) new_matrix[i,j] = sp.Symbol(elem_name) if flag_print: out_file.write('\\n')", "= sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')]) Dpt[5] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')]) Dpt[6]", "# xpi : derivative of xi # xgpi : derivative", "*\\n') out_file.write(' * origin: in the waist, middle point between", "Rj_print[index-1][i]] = print_save_symb_matrix(new_matrix, 'R{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian of", "and the torso def torso_waist_angles(out_file, R, om, waist_id, torso_id): out_file.write('", "= {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left", "+ om_tilde_l_foot * (R_l_foot.T * Dpt_l_foot) for i in range(0,", "in_out.Lwrist_or[{}] = {};\\n'.format(i, R_l_elb[i])) out_file.write('\\n') out_file.write(' // right wrist absolute", "}\\n\\n') out_file.write(' // left foot absolute position\\n') for i in", "l_elb_id, r_wrist_x, r_wrist_y, r_wrist_z): # symbolic variables declarations x_r_elb =", "in range(0, 3): R[0][i,j] = sp.Symbol('IMU{}{}'.format(i+1, j+1)) write_symb_Rj(nb_bodies, Rj, xj,", "s_y_waist = sin(in_out.theta_waist[1]);\\n') out_file.write(' s_y_torso = sin(in_out.theta_torso[1]);\\n') out_file.write(' s_z_waist =", "* Om[i] om[i] = write_symb_vector(out_file, om[i], 'om{}_'.format(i+1), '') om_tilde[i] =", "for j in range(0, 3): out_file.write(' in_out.r_COM_der[{}][{}] = '.format(get_string_enum(joint_id_names[i]), j))", "body i\\n') out_file.write(' * xpi : derivative of xi\\n') out_file.write('", "xj, xgj, der_var): # element to derive (string) elem_str =", "joint_id_names): out_file.write('/*! \\n') out_file.write(' * \\\\author <NAME>\\n') out_file.write(' * \\\\file", "i and its predecessor # si : sine of the", "global com absolute position\\n') for i in range(0, 3): out_file.write('", "{};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // right foot orientation", "sin(in_out.theta_Lfoot[1]);\\n') out_file.write(' s_z_Rfoot = sin(in_out.theta_Rfoot[2]);\\n') out_file.write(' s_z_Lfoot = sin(in_out.theta_Lfoot[2]);\\n\\n') out_file.write('", "= nb_bodies*[None] x[0] = Rt[0] * Dpt[0] xp[0] = om_tilde[0]", "-0.225) torso_waist_angles(out_file, R, om, 0, 15) # generate the symbolic", "len(cut_line_1) == 2 and len(cut_line_2) == 2: if len(cut_line_2[0].split('[')) ==", "M) # end of the file write_file_end(file_temp) file_temp.close() # output", "on all the lines for line in f: # declaration", "vector (from origin, expressed in the inertial frame) # of", "out_file.write('0.0;\\n') out_file.write('\\n') out_file.write(' // global com jacobian\\n') out_file.write(' if (flag_jacob)\\n", "j+1)) if i == 2 and j == 2: out_file.write(';\\n')", "# left arm ]) # joint names joint_id_names = np.array(['0',", "sp.zeros(3, 3) xj[i][j] = sp.zeros(3, 1) xgj[i][j] = sp.zeros(3, 1)", "axis == 2: return np.array([[0.0], [-elem], [0.0]]) elif axis ==", "12 # # 07 13 # # # ###################### #", "== 3: return np.array([[cosine, sine, 0.0], [-sine, cosine, 0.0], [0.0,", "// waist orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[waist_id], 'omega_waist', 'waist')", "flag_print = 0 for j in range(0,9): cur_jac = symbolic_jacob_der(Rj,", "symbolic_jacob_der(Rj, xj, xgj, R_r_foot[j], 'q{}'.format(i+1)) if cur_jac != 0: if", "xj, xgj, xj_print, x[i], i+1) x[i] = write_symb_vector(out_file, x[i], 'x{}_'.format(i+1),", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xgj_print[i][j][k])) out_file.write(' }\\n') # results", "3, 2, # right arm 2, 1, 3, 2 #", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Rfoot_or_der[{}][{}]", "derivatives of 'yaw_pitch_roll_angles' def theta_dot_compute(out_file, omega_in, omega_out, body_part): out_file.write(' in_out.{}[0]", "point positions Dpt = nb_bodies*[None] # waist Dpt[0] = sp.Matrix([0.0,", "waist Dg[0] = sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')]) # right leg Dg[1]", "positions Dpt = nb_bodies*[None] # waist Dpt[0] = sp.Matrix([0.0, 0.0,", "= sp.Symbol(elem_name) return new_matrix, save_matrix # write symbolic jacobian of", "+ {}*{}'.format(M[k], str(xgj[k][i][j]))) else: flag_first = 1 out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j]))) if", "absolute velocity\\n') for i in range(0,3): out_file.write(' in_out.rp_Lwrist[{}] = {};\\n'.format(i,", "// joint absolute velocities\\n') Om = nb_bodies*[None] om = nb_bodies*[None]", "absolute velocities\\n') Om = nb_bodies*[None] om = nb_bodies*[None] om_tilde =", "the file write_file_beginning(file_temp, joint_id_names) # variables initialization write_intialization(file_temp, nb_bodies, joint_id_names)", "out_file.write(' // right foot contact points jacobian\\n') out_file.write(' if (flag_jacob)\\n", "j+1, i+1)) else: flag_first = 1 out_file.write('({}*xgp{}_{}'.format(M[j], j+1, i+1)) if", "'M_12', # right leg 'M_13', 'M_14', 'M_15', 'M_16', 'M_17', 'M_18',", "j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left wrist absolute orientation", "sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')]) Dpt[14] = sp.Matrix([0.0, 0.0, 0.0]) Dpt[15] =", "3) flag_print = 0 for i in range(0,3): for j", "# copy temporary file out_file.write(line) out_file.close() # remove temporary file", "j in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r[j],", "'') xp[0] = write_symb_vector(out_file, xp[0], 'xp1_', '') for i in", "for 0 or 1 !'.format(cur_len-1)) exit() return result # write", "Dpt_r_foot_cont[i] x_l_cont[i] = x_l_foot + R_l_foot.T * Dpt_l_foot_cont[i] # writing", "out_file.write('\\n') out_file.write(' // right wrist absolute velocity\\n') for i in", "= cur_split[0].upper() for i in range(1, len(cur_split)): new_string = '{}_{}'.format(new_string,", "i in range(1, len(cur_split)): new_string = '{}_{}'.format(new_string, cur_split[i].upper()) return new_string", "Dg[9] = sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')]) Dg[10] = sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')])", "try: a = int(value.split('_')[0].split('R')[1]) b = int(value.split('_')[1][0]) c = int(value.split('_')[1][1])", "for k in range(0, 3): if xj_print[i][j][k] != None: if", "if epsilon > 0: # epsilon = 1 -> pitch", "the current body i\\n') out_file.write(' * Omi : rotational vector", "i+1) x[i] = write_symb_vector(out_file, x[i], 'x{}_'.format(i+1), '') xp[i] = write_symb_vector(out_file,", "1 if count >= nb_max_line: out_write.write(';\\n') count = 0 if", "double ') for i in range(min, max+1): out_file.write('{}{}'.format(prefix, i)) if", "b except: return False, -1, -1 # count the number", "'c{}'.format(der_q): result += -sp.Symbol('s{}'.format(der_q)) # sine elif pos_str == 's{}'.format(der_q):", "pi/2] out_file.write(' in_out.{}[0] = atan2({}, {});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1]", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Rwrist_or_der[{}][{}] =", "= write_symb_vector(out_file, xg[i], 'xg{}_'.format(i+1), '') xgp[i] = write_symb_vector(out_file, xgp[i], 'xgp{}_'.format(i+1),", "0.0, 0.0]) # trunk Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')]) Dpt[14]", "computation of:\\n') out_file.write(' * COM (center of mass) position and", "* // 17 16 21 //\\n') out_file.write(' * // 18", "i)) out_file.write('\\n // joint cosines\\n') for i in range(1, nb_bodies):", "its COM (center of mass) G_i,\\n') out_file.write(' * expressed in", "for i in range(0, 3): out_file.write(' in_out.rp_COM[{}] = '.format(i)) flag_first", "write_intialization(out_file, nb_bodies, joint_id_names): out_file.write(' // -- variables initialization -- //\\n')", "out_write.write(', {}'.format(cut_line[0].strip())) count += len(cut_line[0].strip()) + 2 if count >=", "3) save_matrix = 9 * [None] for i in range(0,3):", "# trunk 15, 16, 17, 18, # right arm 15,", "get_rotation_matrix(axis, direct, cosine, sine): if direct: if axis == 1:", "0.0, sp.Symbol('DPT_3_32')]) # right arm Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')])", "= 0 for k in range(0, 3): if xj_print[i][j][k] !=", "a string for the enumeration of joints def get_string_enum(cur_string): cur_split", "and orientation def feet_compute(out_file, joint_id_names, R, x, xp, om, Rj,", "in_out.rp_Lwrist[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' // left wrist jacobian\\n')", "= matrix[i,j] save_matrix[3*i+j] = None else: elem_name = '{}{}{}{}'.format(start_name, i+1,", "in range(0,3): out_file.write('{}{}{}'.format(prefix, i+1, j+1)) if i == 2 and", "i in range(0,3): out_file.write(' in_out.r_Rwrist[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write('", "all declarations def print_all_declaration(in_file, out_write, nb_max_char): count = 0 with", "range(0,3): out_file.write(' in_out.r_Rwrist[{}] = {};\\n'.format(i, x_r[i])) out_file.write('\\n') out_file.write(' // right", "rotational vector of body i # Ri : absolute rotational", "out_file.write(' * waist and torso orientaion angles and derivatives\\n') out_file.write('", "cos(in_out.theta_torso[2]);\\n\\n') out_file.write(' s_y_waist = sin(in_out.theta_waist[1]);\\n') out_file.write(' s_y_torso = sin(in_out.theta_torso[1]);\\n') out_file.write('", "nb_contacts): x_r_cont[i] = x_r_foot + R_r_foot.T * Dpt_r_foot_cont[i] x_l_cont[i] =", "anchor point positions\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0", "= sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')]) Dg[11] = sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')]) Dg[12]", "the file def count_elem(in_file, elem): count = 0; with open(in_file,", "nb_bodies*[None] xgj = nb_bodies*[None] Rj_print = nb_bodies*[None] xj_print = nb_bodies*[None]", "1: out_file.write(' // -- variables declaration -- //\\n\\n') print_all_declaration(in_temp, out_file,", "= xp[r_foot_id] xp_l_foot = xp[l_foot_id] om_r_foot = om[r_foot_id] om_l_foot =", "1, 2, 3, 4, 5, # right leg 0, 7,", "= sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')]) # right arm Dg[16] = sp.Matrix([sp.Symbol('L_1_22'),", "'M_22', 'M_23', 'M_24', 'M_25', # right arm 'M_26', 'M_27', 'M_28',", "[0.0, 0.0, 1.0]]) else: return np.array([]) else: if axis ==", "def write_file_beginning(out_file, joint_id_names): out_file.write('/*! \\n') out_file.write(' * \\\\author <NAME>\\n') out_file.write('", "flag_first: out_file.write(' + {}*xgp{}_{}'.format(M[j], j+1, i+1)) else: flag_first = 1", "out_file.write(' * feet position, velocity and orientation\\n') out_file.write(' * waist", "3, 2 # left arm ]) # parent index parent_body_index", "* COM (center of mass) position and velocity\\n') out_file.write(' *", "0.0, 0.0]) # right leg Dpt[1] = sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0])", "isVec(value): try: a = int(value.split('_')[0].split('x')[1]) b = int(value.split('_')[1]) return True,", "'M_10', 'M_11', 'M_12', # right leg 'M_13', 'M_14', 'M_15', 'M_16',", "in range(0,3): out_file.write(' in_out.rp_Lfoot[{}] = {};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' //", "[0.0]]) elif axis == 3: return np.array([[0.0], [0.0], [-elem]]) else:", "in range(0, 3): out_file.write(' in_out.r_Rfoot_cont[{}][{}] = {};\\n'.format(i, j, x_r_cont[i][j])) out_file.write('\\n')", "else: new_string = cur_string cur_split = filter(None, re.split(\"([A-Z][^A-Z]*)\", new_string)) new_string", "-- variables initialization -- //')) != 1: out_file.write(' // --", "// torso orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_torso', R[torso_id],", "= ') if len(cut_line) == 2: if len(cut_line[0].split('[')) == 1:", "parent_body_index[i] Om[i] = get_vector_axis(rot_axis[i], 1, sp.Symbol('Om{}'.format(i+1))) om[i] = om[parent_id] +", "sp.Symbol('L_3_26')]) Dg[21] = sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')]) Dg[22] = sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'),", "06 12 # # 07 13 # # # ######################", "of the file def write_file_beginning(out_file, joint_id_names): out_file.write('/*! \\n') out_file.write(' *", "get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // feet absolute orientation\\n')", "= x_r_elb + R_r_elb.T * Dpt_r_wrist x_l = x_l_elb +", "'') om_tilde[i] = get_tilde(om[i]) # x & xp out_file.write('\\n //", "trunk Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')]) Dpt[14] = sp.Matrix([0.0, 0.0,", "R_r_foot.T * Dpt_r_foot_cont[i] x_l_cont[i] = x_l_foot + R_l_foot.T * Dpt_l_foot_cont[i]", "for i in range(0,3): out_file.write(' in_out.rp_Rwrist[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n')", "== 0: out_write.write(' double {}'.format(cut_line_2[0].strip())) else: out_write.write(', {}'.format(cut_line_2[0].strip())) count +=", "def torso_waist_angles(out_file, R, om, waist_id, torso_id): out_file.write(' // waist orientation", "sine of the relative angle before body i\\n') out_file.write(' *", "right wrist jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0", "* // 04 10 //\\n') out_file.write(' * // 05 11", "1.0 / c_y_waist;\\n') out_file.write(' inv_c_y_torso = 1.0 / c_y_torso;\\n\\n') out_file.write('", "out_file.write('{}'.format(xgj_print[i][j][k])) out_file.write(' }\\n') # results out_file.write('\\n // -- Collecting results", "'w') with open(in_temp, 'r') as f: # loop on all", "c except: return False, -1, -1, -1 # return true", "= x_l_foot + R_l_foot.T * Dpt_l_foot xp_r = xp_r_foot +", "Dpt[23] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')]) ## COM positions Dg =", "om_r_elb = om[r_elb_id] om_l_elb = om[l_elb_id] R_r_elb = R[r_elb_id] R_l_elb", "Dg[8] = sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')]) Dg[9] = sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')])", "symbolic matrix for print def print_save_symb_matrix(matrix, start_name, end_name): new_matrix =", "else: flag_first = 1 out_file.write('({}*xgp{}_{}'.format(M[j], j+1, i+1)) if j ==", "xgj, der_var)*sp.Symbol(cur_term_split[0].strip()) # other else: print('Error: {} * counted ,", "elem_name = '{}{}{}'.format(start_name, i+1, end_name) save_vector[i] = ' {} =", "omega_in[0], omega_in[2])) # angles (position and derivative) of the waist", "of the current body i\\n') out_file.write(' * Omi : rotational", "sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')]) Dpt[19] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')]) # left", "[0.0, sine, cosine]]) elif axis == 2: return np.array([[cosine, 0.0,", "out_file.write(' in_out.{}[1] = atan2(-{}, sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0],", "1) R[0] = write_symb_matrix(out_file, R[0], 'R1_', '') Rt[0] = R[0].T", "{}'.format(cut_line_2[0].strip())) count += 1 if count >= nb_max_line: out_write.write(';\\n') count", "x_r_foot + R_r_foot.T * Dpt_r_foot x_l = x_l_foot + R_l_foot.T", "anchor point positions Dpt = nb_bodies*[None] # waist Dpt[0] =", "+ R_r_elb.T * Dpt_r_wrist x_l = x_l_elb + R_l_elb.T *", "Rj[a-1][der_q-1][(b-1)*3+(c-1)] # vector elif vec_flag: result += xj[d-1][der_q-1][e-1] # apply", "[0.0], [0.0]]) elif axis == 2: return np.array([[0.0], [-elem], [0.0]])", "mass) position and velocity\\n') out_file.write(' * feet position, velocity and", "= cos(in_out.theta_torso[1]);\\n') out_file.write(' c_z_waist = cos(in_out.theta_waist[2]);\\n') out_file.write(' c_z_torso = cos(in_out.theta_torso[2]);\\n\\n')", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Lfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]),", "new_vector[i] = sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return new_vector # write", "x, xp, om, Rj, xj, xgj, 19, 23, -0.02, -0.005,", "# computation om_tilde_r_elb = get_tilde(om_r_elb) om_tilde_l_elb = get_tilde(om_l_elb) x_r =", "Dg[17] = sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')]) Dg[18] = sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')])", "# print matrix components declaration def write_matrix_declaration(out_file, prefix): out_file.write(' double", "k, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left foot contact points", "of an element (for jacobian) def der_elem(elem_str, Rj, xj, xgj,", "other else: print('Error: {} * counted , only implemented for", "j+1, 3*i+j)) out_file.write('\\n // IMU - angles velocity\\n') for i", "left leg 'TorsoRoll_id' , 'TorsoPitch_id' , 'TorsoYaw_id' , # trunk", "sp.Symbol('s{}'.format(i+1))) R[i] = Rd[i] * R[parent_body_index[i]] write_symb_Rj(nb_bodies, Rj, xj, xgj,", "= R[l_foot_id] Dpt_r_foot = sp.zeros(3, 1) Dpt_l_foot = sp.zeros(3, 1)", "of the relative angle before body i\\n') out_file.write(' *\\n') out_file.write('", "1 elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name) out_file.write(' {} =", "out_file.write(' s_z_torso = sin(in_out.theta_torso[2]);\\n\\n') out_file.write(' if ((!c_y_waist) || (!c_y_torso))\\n {\\n')", "out_file.write('\\n // joint cosines\\n') for i in range(1, nb_bodies): out_file.write('", "xgj, x_vector[j], 'q{}'.format(i+1)) [xj[index-1][i], xj_print[index-1][i]] = print_save_symb_vector(new_vector, 'x{}_'.format(index), '_d{}'.format(i+1)) #", "range(1, nb_bodies): parent_id = parent_body_index[i] Om[i] = get_vector_axis(rot_axis[i], 1, sp.Symbol('Om{}'.format(i+1)))", "v[0], 0.0]]) # get rotation matrix def get_rotation_matrix(axis, direct, cosine,", "vec_flag: result += xj[d-1][der_q-1][e-1] # apply negative if neg_flag: result", "(1:x, 2:y, 3:z) rot_axis = np.array([0, # waist 2, 1,", "c = int(value.split('_')[1][1]) return True, a, b, c except: return", "line in f: cut_line = line.split(elem) if len(cut_line) == 2:", "jacobian of \\'xi\\'\\n') out_file.write(' * xgji : jacobian of \\'xgi\\'\\n')", "at the origin (waist), but aligned with the ground (info", "flag_first = 0 for i in range(0, nb_contacts): for j", "+ om_tilde_r_foot * (R_r_foot.T * Dpt_r_foot) xp_l = xp_l_foot +", "Rj_print = nb_bodies*[None] xj_print = nb_bodies*[None] xgj_print = nb_bodies*[None] for", "range(0, 3): out_file.write(' in_out.r_Lfoot_cont[{}][{}] = {};\\n'.format(i, j, x_l_cont[i][j])) out_file.write('\\n') out_file.write('", "= nb_bodies*[None] Rj_print[i] = nb_bodies*[None] xj_print[i] = nb_bodies*[None] xgj_print[i] =", "Omi : rotational vector from the previous body to the", "len(joint_id_names)): count += 1 if i == 1: out_file.write('{}'.format(get_string_enum(joint_id_names[i]))) elif", "Dpt[i] xp[i] = xp[parent_id] + om_tilde[parent_id] * (Rt[parent_id] * Dpt[i])", "sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')]) # left arm Dg[20] = sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'),", "str(xgj[k][i][j]))) if k == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n')", "and orientation\\n') out_file.write(' * waist and torso orientaion angles and", "file def count_elem(in_file, elem): count = 0; with open(in_file, 'r')", "# right leg 'M_13', 'M_14', 'M_15', 'M_16', 'M_17', 'M_18', #", "x_max, y_min, y_max): # symbolic variables declarations nb_contacts = 4", "x[l_elb_id] xp_r_elb = xp[r_elb_id] xp_l_elb = xp[l_elb_id] om_r_elb = om[r_elb_id]", "# right arm 15, 20, 21, 22 # left arm", "if (flag_jacob)\\n {\\n') flag_first = 0 for i in range", "out_file.write(' * // 20 01 24 //\\n') out_file.write(' * //", "out_file.write(' // left wrist absolute position\\n') for i in range(0,3):", "of body i # xpi : derivative of xi #", "om_tilde_r_foot * (R_r_foot.T * Dpt_r_foot) xp_l = xp_l_foot + om_tilde_l_foot", "nb_bodies*[None] Rd[0] = sp.zeros(3, 3) R[0] = sp.zeros(3, 3) for", "torso def torso_waist_angles(out_file, R, om, waist_id, torso_id): out_file.write(' // waist", "= {};\\n'.format(i, R_l_elb[i])) out_file.write('\\n') out_file.write(' // right wrist absolute orientation", "joint_id_names, M, xg, xgp, xgj) feet_compute(out_file, joint_id_names, R, x, xp,", "write_file_beginning(file_temp, joint_id_names) # variables initialization write_intialization(file_temp, nb_bodies, joint_id_names) # symbolic", "pitch angle in [pi/2 ; 3*pi/2] out_file.write(' in_out.{}[0] = atan2(-{},", "{} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','') new_vector[i] = sp.Symbol(elem_name) return new_vector, save_vector", "out_file.write('\\n') out_file.write(' in_out.r_Rwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write('", "the current body i # (previous body is not always", "{};\\n'.format(i, R_r_foot[i])) out_file.write('\\n') for i in range(0, 9): out_file.write(' in_out.Lfoot_or[{}]", "angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[r_foot_id], 'omega_Rfoot', 'Rfoot') out_file.write('\\n') out_file.write(' //", "== 2: return np.array([[0.0], [elem], [0.0]]) elif axis == 3:", "write the beginning of the file def write_file_beginning(out_file, joint_id_names): out_file.write('/*!", "print_save_symb_vector(new_vector, 'xg{}_'.format(index), '_d{}'.format(i+1)) # symbolic computation def symbolic_computation(out_file, nb_bodies, joint_id_names,", "range(0, 9): if Rj_print[i][j][k] != None: if not flag_first: flag_first", "0.0]) Dpt[15] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')]) # right arm Dpt[16]", "cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1)) if cur_jac !=", "result += -sp.Symbol('s{}'.format(der_q)) # sine elif pos_str == 's{}'.format(der_q): result", "always body i-1), expressed in the relative\\n') out_file.write(' * frame", "nb_bodies): new_matrix = sp.zeros(3, 3) # loop on all the", "for j in range (1, nb_bodies): flag_print = 0 for", ": absolute position vector of the COM G_i of body", "sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')]) Dg[17] = sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')]) Dg[18] =", "1, 3, 2, 1, 2, # right leg 2, 1,", ", only implemented for 0 or 1 !'.format(cur_len-1)) exit() return", "]) # parent index parent_body_index = np.array([ -1, # waist", "sine], [0.0, -sine, cosine]]) elif axis == 2: return np.array([[cosine,", "sp.Symbol('DPT_3_41')]) Dpt[19] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')]) # left arm Dpt[20]", "[None] for i in range(0,3): if vector[i] == 0 or", "leg Dpt[7] = sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0]) Dpt[8] = sp.Matrix([0.0, sp.Symbol('DPT_2_18'),", "x[i] = x[parent_id] + Rt[parent_id] * Dpt[i] xp[i] = xp[parent_id]", "expressed in the relative frame of the current body i\\n')", "of the COM G_i of body i\\n') out_file.write(' * xpi", "= y_min Dpt_r_foot_cont[3][1] = y_max for i in range(0, nb_contacts):", "matrix elements for j in range(0, 9): new_matrix[j] = symbolic_jacob_der(Rj,", "1 out_file.write('\\n') out_file.write('{}'.format(Rj_print[i][j][k])) out_file.write(' }\\n') # omega out_file.write('\\n // joint", "= nb_bodies*[None] for i in range(0, nb_bodies): xg[i] = x[i]", "nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M) # end of", "# output file out_file = open('./{}.cc'.format(out_file_name), 'w') with open(in_temp, 'r')", "Dg[6] = sp.Matrix([sp.Symbol('L_1_12'), 0.0 , sp.Symbol('L_3_12')]) # left leg Dg[7]", "* xji : jacobian of \\'xi\\'\\n') out_file.write(' * xgji :", "# (previous body is not always body i-1), expressed in", "out_file.write('{}'.format(Rj_print[i][j][k])) out_file.write(' }\\n') # omega out_file.write('\\n // joint absolute velocities\\n')", "of \\'xi\\'\\n') out_file.write(' * xgji : jacobian of \\'xgi\\'\\n') out_file.write('", "return new_vector # write symbolic matrix and replace symbolic variable", "new_matrix[i,j] = sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return new_matrix # save", "0.0, sp.Symbol('DPT_3_8')]) Dpt[4] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')]) Dpt[5] = sp.Matrix([0.0,", "in_out.r_Rfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' //", "x[l_foot_id] xp_r_foot = xp[r_foot_id] xp_l_foot = xp[l_foot_id] om_r_foot = om[r_foot_id]", "(for jacobian) def der_elem(elem_str, Rj, xj, xgj, der_var): # element", "') if len(cut_line_1) == 2 and len(cut_line_2) == 2: if", "{};\\n'.format(i, xp_l[i])) out_file.write('\\n') out_file.write(' // left wrist jacobian\\n') out_file.write(' if", "elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name) save_matrix[3*i+j] = ' {}", "positions Dg = nb_bodies*[None] # waist Dg[0] = sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'),", "Rj, xj, xgj, Rj_print, R[0], 1) R[0] = write_symb_matrix(out_file, R[0],", "body i # Ri : absolute rotational matrix # Rti", "return np.array([[cosine, -sine, 0.0], [sine, cosine, 0.0], [0.0, 0.0, 1.0]])", "= write_symb_vector(out_file, om[i], 'om{}_'.format(i+1), '') om_tilde[i] = get_tilde(om[i]) # x", "it has a shape 'x%a_%b' (indexes %a, %b also returned)", "foot absolute orientation jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first =", "foot orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[l_foot_id], 'omega_Lfoot', 'Lfoot') out_file.write('\\n')", "'om{}_'.format(i+1), '') om_tilde[i] = get_tilde(om[i]) # x & xp out_file.write('\\n", "= sp.zeros(3, 3) # loop on all the matrix elements", "* // 06 12 //\\n') out_file.write(' * // 07 13", "-1, -1 # return true if it has a shape", "[rot_flag, a, b, c] = isRot(pos_str) [vec_flag, d, e] =", "previous body\\n') out_file.write(' * Rdi : rotational matrix between body", "|| (!c_y_Lfoot))\\n {\\n') out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_Rfoot = 1.0", "np.array([]) else: if axis == 1: return np.array([[1.0, 0.0, 0.0],", "# ###################### # # origin: in the waist, middle point", "axis == 1: return np.array([[elem], [0.0], [0.0]]) elif axis ==", "= get_vector_axis(rot_axis[i], 1, sp.Symbol('Om{}'.format(i+1))) om[i] = om[parent_id] + Rt[parent_id] *", "orientation\\n') out_file.write(' * waist and torso orientaion angles and derivatives\\n')", "right leg 'M_13', 'M_14', 'M_15', 'M_16', 'M_17', 'M_18', # left", "velocity and orientation def feet_compute(out_file, joint_id_names, R, x, xp, om,", "out_file_name = 'forward_kinematics' gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg,", "# # origin: in the waist, middle point between the", "range(1, nb_bodies): Rd[i] = get_rotation_matrix(rot_axis[i], 1, sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1))) R[i] =", "x[i] + Rt[i] * Dg[i] xgp[i] = xp[i] + om_tilde[i]", "trunk 'M_22', 'M_23', 'M_24', 'M_25', # right arm 'M_26', 'M_27',", "compute the center of mass position and velocity def com_compute(out_file,", "in term_list: # detect products cur_term_split = cur_term.split('*') cur_len =", "x_min Dpt_r_foot_cont[2][0] = x_max Dpt_r_foot_cont[3][0] = x_max Dpt_r_foot_cont[0][1] = y_min", "# xi : absolute position vector (from origin, expressed in", "xp_l = xp_l_foot + om_tilde_l_foot * (R_l_foot.T * Dpt_l_foot) for", "position\\n') for i in range(0,3): out_file.write(' in_out.r_Rfoot[{}] = {};\\n'.format(i, x_r[i]))", "'') om_tilde[0] = get_tilde(om[0]) for i in range(1, nb_bodies): parent_id", "omi : absolute rotational vector of body i # Ri", "# jacobian x out_file.write('\\n // jacobian anchor point positions\\n') out_file.write('", "as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_Lfoot', R[l_foot_id], 1) out_file.write('\\n') out_file.write(' c_y_Rfoot", "= sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')]) Dpt[17] = sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0]) Dpt[18]", "sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')]) Dg[11] = sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')]) Dg[12] =", "xg & xgp out_file.write('\\n // com absolute positions and velocities\\n')", "absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Lfoot[{}] = {};\\n'.format(i,", "out_file.write('\\n') out_file.write(' c_y_Rfoot = cos(in_out.theta_Rfoot[1]);\\n') out_file.write(' c_y_Lfoot = cos(in_out.theta_Lfoot[1]);\\n') out_file.write('", "symb_var, der_var): # list of all terms term_list = str(symb_var).replace('-", "* Dpt[i] xp[i] = xp[parent_id] + om_tilde[parent_id] * (Rt[parent_id] *", ": cosine of the relative angle before body i #", "[rad/s]\\n') theta_dot_compute(out_file, om[l_foot_id], 'omega_Lfoot', 'Lfoot') out_file.write('\\n') # compute the wrists", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Lfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j,", "Dpt_l_wrist[0] = r_wrist_x Dpt_l_wrist[1] = -r_wrist_y Dpt_l_wrist[2] = r_wrist_z #", "open(in_temp, 'r') as f: # loop on all the lines", "= '.format(i)) flag_first = 0 for j in range(0, nb_bodies):", "vector (from origin, expressed in the inertial frame)\\n') out_file.write(' *", "com jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') for i in range(1,", "R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2({}, {});\\n'.format(angle_name, R_matrix[1], R_matrix[0]))", "om_l_foot = om[l_foot_id] R_r_foot = R[r_foot_id] R_l_foot = R[l_foot_id] Dpt_r_foot", "[None] # computation om_tilde_r_foot = get_tilde(om_r_foot) om_tilde_l_foot = get_tilde(om_l_foot) x_r", "symbolic_jacob_der(Rj, xj, xgj, R_r_elb[j], 'q{}'.format(i+1)) if cur_jac != 0: if", "Dg[i]) write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, xg[i], i+1) xg[i] =", "# right leg Dg[1] = sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7') , sp.Symbol('L_3_7')])", "R[r_foot_id] R_l_foot = R[l_foot_id] Dpt_r_foot = sp.zeros(3, 1) Dpt_l_foot =", "== 2: if len(cut_line_2[0].split('[')) == 1: if count == 0:", "# # 17 16 21 # # 18 15 22", "body_part, omega_in[1], body_part, omega_in[0], omega_in[2])) # angles (position and derivative)", "out_file.write(' * frame of the previous body\\n') out_file.write(' * Rdi", "= r_wrist_z # computation om_tilde_r_elb = get_tilde(om_r_elb) om_tilde_l_elb = get_tilde(om_l_elb)", "1 elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(Rj_print[i][j][k])) out_file.write('", "in range(0, 3): for j in range(0, 3): out_file.write(' IMU{}{}", "axis == 2: return np.array([[0.0], [elem], [0.0]]) elif axis ==", "def write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R_matrix, index): # loop", "2: # negative neg_flag = 1 pos_str = elem_split[1] else:", "xgj) feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj,", "R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, -sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2],", "out_file.write('\\n') out_file.write(' // global com absolute velocity\\n') for i in", "out_file.write(' c_y_torso = cos(in_out.theta_torso[1]);\\n') out_file.write(' c_z_waist = cos(in_out.theta_waist[2]);\\n') out_file.write(' c_z_torso", "'theta_torso', R[torso_id], 1) out_file.write('\\n') out_file.write(' c_y_waist = cos(in_out.theta_waist[1]);\\n') out_file.write(' c_y_torso", "the previous body to the current body i \\n') out_file.write('", "= r_wrist_z Dpt_l_wrist[0] = r_wrist_x Dpt_l_wrist[1] = -r_wrist_y Dpt_l_wrist[2] =", "'xg{}_'.format(index), '_d{}'.format(i+1)) # symbolic computation def symbolic_computation(out_file, nb_bodies, joint_id_names, rot_axis,", "+ om_tilde_r_elb * (R_r_elb.T * Dpt_r_wrist) xp_l = xp_l_elb +", "def symbolic_computation(out_file, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M): out_file.write('\\n\\n", "get tilde matrix def get_tilde(v): return np.array([[0.0, -v[2], v[1]], [v[2],", "R_l_foot[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first", "# left arm Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')]) Dpt[21] =", "axis == 1: return np.array([[1.0, 0.0, 0.0], [0.0, cosine, sine],", "out_file.write(' in_out.r_Lfoot_cont[{}][{}] = {};\\n'.format(i, j, x_l_cont[i][j])) out_file.write('\\n') out_file.write(' // left", "velocities\\n') xg = nb_bodies*[None] xgp = nb_bodies*[None] for i in", "j, x_r_cont[i][j])) out_file.write('\\n') out_file.write(' // right foot contact points jacobian\\n')", "xp_r[i])) out_file.write('\\n') out_file.write(' // right foot jacobian\\n') out_file.write(' if (flag_jacob)\\n", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xj_print[i][j][k])) out_file.write(' }\\n')", "* DGi : position vector from the anchor point of", "the waist and the torso def torso_waist_angles(out_file, R, om, waist_id,", "isRot(pos_str) [vec_flag, d, e] = isVec(pos_str) # rotation matrix if", "f: cut_line = line.split(elem) if len(cut_line) == 2: count +=", "count != 0: out_write.write(';\\n') # get tilde matrix def get_tilde(v):", "out_file.write(' // waist orientation matrix as angles [rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_waist',", "range(1, nb_bodies): out_file.write(' c{} = cos(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint", "# cosine if pos_str == 'c{}'.format(der_q): result += -sp.Symbol('s{}'.format(der_q)) #", ": jacobian of \\'Ri\\'\\n') out_file.write(' */\\n') out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\\n{\\n') #", "nb_bodies): flag_print = 0 for k in range(0, 9): if", "in range(0, nb_contacts): x_r_cont[i] = x_r_foot + R_r_foot.T * Dpt_r_foot_cont[i]", "= om[parent_id] + Rt[parent_id] * Om[i] om[i] = write_symb_vector(out_file, om[i],", "= {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' // left wrist absolute velocity\\n')", "declaration if len(line.split('// -- variables initialization -- //')) != 1:", "////////////////////////\\n') out_file.write(' *\\n') out_file.write(' * origin: in the waist, middle", "in range(0, nb_contacts): for j in range(0, 3): out_file.write(' in_out.r_Lfoot_cont[{}][{}]", "write symbolic jacobian of a com point def write_symb_xgj(nb_bodies, Rj,", "theta_dot_compute(out_file, om[waist_id], 'omega_waist', 'waist') out_file.write('\\n') out_file.write(' // torso orientation angle", "nb_contacts): for j in range(0, 3): out_file.write(' in_out.r_Lfoot_cont[{}][{}] = {};\\n'.format(i,", "'{}{}{}'.format(start_name, i+1, end_name) save_vector[i] = ' {} = {};\\n'.format(elem_name, vector[i]).replace('1.0*','')", "in range(0, 3): for j in range(0, 3): R[0][i,j] =", "== 1: return np.array([[-elem], [0.0], [0.0]]) elif axis == 2:", "R_l_elb[i])) out_file.write('\\n') out_file.write(' // right wrist absolute orientation jacobian\\n') out_file.write('", "j in range(0, 3): R[0][i,j] = sp.Symbol('IMU{}{}'.format(i+1, j+1)) write_symb_Rj(nb_bodies, Rj,", "cur_len == 2: result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[1].strip())", "2: return np.array([[cosine, 0.0, -sine], [0.0, 1.0, 0.0], [sine, 0.0,", "j, x_l_cont[i][j])) out_file.write('\\n') out_file.write(' // left foot contact points jacobian\\n')", "if flag_first: out_file.write(' + {}*{}'.format(M[j], xg[j][i])) else: flag_first = 1", "sine): if direct: if axis == 1: return np.array([[1.0, 0.0,", "# 20 01 24 # # 02 08 # #", "xp[0] = write_symb_vector(out_file, xp[0], 'xp1_', '') for i in range(1,", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(xj_print[i][j][k])) out_file.write(' }\\n') #", "0, 15) # generate the symbolic output file def gen_symbolic_out(out_file_name,", "IMU{}{} = in_out.IMU_Orientation[{}];\\n'.format(i+1, j+1, 3*i+j)) out_file.write('\\n // IMU - angles", "in range(0, 9): if Rj_print[i][j][k] != None: if not flag_first:", "{\\n') out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_Rfoot = 1.0 / c_y_Rfoot;\\n')", "nb_bodies*[None] xj_print[i] = nb_bodies*[None] xgj_print[i] = nb_bodies*[None] for j in", "x_r_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0: if not flag_first: flag_first", "rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M): # temporary file in_temp", "absolute rotational vector of body i\\n') out_file.write(' * Ri :", "xj, xgj, R_matrix[j], 'q{}'.format(i+1)) [Rj[index-1][i], Rj_print[index-1][i]] = print_save_symb_matrix(new_matrix, 'R{}_'.format(index), '_d{}'.format(i+1))", "# # Di : position vector from the anchor point", "(waist), but aligned with the ground (info from IMU) #", "a float def isInt(value): try: int(value) return True except: return", "== 'c{}'.format(der_q): result += -sp.Symbol('s{}'.format(der_q)) # sine elif pos_str ==", "# omi : absolute rotational vector of body i #", "nb_bodies): if flag_first: out_file.write(' + {}*xgp{}_{}'.format(M[j], j+1, i+1)) else: flag_first", "angles (and derivative) def yaw_pitch_roll_angles(out_file, angle_name, R_matrix, epsilon): if epsilon", "if ((!c_y_Rfoot) || (!c_y_Lfoot))\\n {\\n') out_file.write(' return;\\n }\\n\\n') out_file.write(' inv_c_y_Rfoot", "der_var): # element to derive (string) elem_str = elem_str.replace('- ','-').strip()", "jacobian of a com point def write_symb_xgj(nb_bodies, Rj, xj, xgj,", "om[0] = write_symb_vector(out_file, om[0], 'om1_', '') om_tilde[0] = get_tilde(om[0]) for", "xgj, R_matrix[j], 'q{}'.format(i+1)) [Rj[index-1][i], Rj_print[index-1][i]] = print_save_symb_matrix(new_matrix, 'R{}_'.format(index), '_d{}'.format(i+1)) #", "xg, xgp, xgj) feet_compute(out_file, joint_id_names, R, x, xp, om, Rj,", "# temporary file in_temp = './{}_temp.cc'.format(out_file_name) file_temp = open(in_temp, 'w')", "in_out.r_Lfoot[{}] = {};\\n'.format(i, x_l[i])) out_file.write('\\n') out_file.write(' // left foot absolute", "# # xi : absolute position vector (from origin, expressed", "new_matrix = sp.zeros(3, 3) flag_print = 0 for i in", "derivative of xi\\n') out_file.write(' * xgpi : derivative of xgi\\n')", "= 1 out_file.write('\\n') out_file.write(' in_out.Lfoot_or_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write('", "om_tilde_r_elb = get_tilde(om_r_elb) om_tilde_l_elb = get_tilde(om_l_elb) x_r = x_r_elb +", "initialization def write_intialization(out_file, nb_bodies, joint_id_names): out_file.write(' // -- variables initialization", "= sp.zeros(3, 1) # loop on all the vector elements", "//\\n') out_file.write(' * // 18 15 22 //\\n') out_file.write(' *", "; 3*pi/2] out_file.write(' in_out.{}[0] = atan2(-{}, -{});\\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write('", "origin (waist), but aligned with the ground (info from IMU)", "all the lines for line in f: cut_line_1 = line.split(elem)", "# left arm ]) # parent index parent_body_index = np.array([", "# waist 'M_7' , 'M_8' , 'M_9' , 'M_10', 'M_11',", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write('{}'.format(Rj_print[i][j][k])) out_file.write(' }\\n')", "range(0, nb_bodies): for j in range(1, nb_bodies): flag_print = 0", "each joint before body i (1:x, 2:y, 3:z) rot_axis =", "re.split(\"([A-Z][^A-Z]*)\", new_string)) new_string = cur_split[0].upper() for i in range(1, len(cur_split)):", "jacobian) def symbolic_jacob_der(Rj, xj, xgj, symb_var, der_var): # list of", "current body i \\n') out_file.write(' * (previous body is not", "range(0, 3): out_file.write(' in_out.rp_COM[{}] = '.format(i)) flag_first = 0 for", "for j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_foot[j],", "angle derivatives [rad/s]\\n') theta_dot_compute(out_file, om[l_foot_id], 'omega_Lfoot', 'Lfoot') out_file.write('\\n') # compute", "'{}'.format(elem)) >= 1: count = 0 with open(in_file,'r') as f:", "not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]),", "# inertial frame: located at the origin (waist), but aligned", "range(min, max+1): out_file.write('{}{}'.format(prefix, i)) if i == max: out_file.write(';\\n') else:", "right wrist absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Rwrist[{}]", "Ri : absolute rotational matrix\\n') out_file.write(' * Rti : transpose", "[rad]\\n') yaw_pitch_roll_angles(out_file, 'theta_torso', R[torso_id], 1) out_file.write('\\n') out_file.write(' c_y_waist = cos(in_out.theta_waist[1]);\\n')", "= 3 * [None] for i in range(0,3): if vector[i]", "x[i], i+1) x[i] = write_symb_vector(out_file, x[i], 'x{}_'.format(i+1), '') xp[i] =", "sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')]) Dg[12] = sp.Matrix([sp.Symbol('L_1_18'), 0.0 , sp.Symbol('L_3_18')]) #", "xg[j][i])) else: flag_first = 1 out_file.write('({}*xg{}_{}'.format(M[j], j+1, i+1)) if j", "out_file.write(' * ci : cosine of the relative angle before", "Rj, xj, xgj, xj_print, x[i], i+1) x[i] = write_symb_vector(out_file, x[i],", "y_min Dpt_r_foot_cont[3][1] = y_max for i in range(0, nb_contacts): Dpt_r_foot_cont[i][2]", "or vector[i] == 1: new_vector[i] = vector[i] save_vector[i] = None", "left leg 0, 13, 14, # trunk 15, 16, 17,", "sp.Symbol('L_2_13'), sp.Symbol('L_3_13')]) Dg[8] = sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')]) Dg[9] = sp.Matrix([sp.Symbol('L_1_15'),", "max+1): out_file.write('{}{}'.format(prefix, i)) if i == max: out_file.write(';\\n') else: out_file.write(',", "R[0][i,j] = sp.Symbol('IMU{}{}'.format(i+1, j+1)) write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[0],", "= sp.Symbol(elem_name) if flag_print: out_file.write('\\n') return new_vector # write symbolic", "cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left foot contact points absolute", "if cur_len == 1: # positive neg_flag = 0 pos_str", "out_file.write(' c_y_waist = cos(in_out.theta_waist[1]);\\n') out_file.write(' c_y_torso = cos(in_out.theta_torso[1]);\\n') out_file.write(' c_z_waist", "symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1)) if cur_jac != 0: if", "before body i\\n') out_file.write(' * ci : cosine of the", "out_file.write('\\n') else: out_file.write(' }\\n\\n') # from an orientation matrix, compute", "return;\\n }\\n\\n') out_file.write(' inv_c_y_waist = 1.0 / c_y_waist;\\n') out_file.write(' inv_c_y_torso", "results -- //\\n\\n') com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj)", "xgj_print[index-1][i]] = print_save_symb_vector(new_vector, 'xg{}_'.format(index), '_d{}'.format(i+1)) # symbolic computation def symbolic_computation(out_file,", "flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\\n')", "jacobian of 'Ri' # return true if it is a", "out_file.write(' inv_c_y_Rfoot = 1.0 / c_y_Rfoot;\\n') out_file.write(' inv_c_y_Lfoot = 1.0", "out_file.write(')/m_tot;\\n') else: out_file.write('0.0;\\n') out_file.write('\\n') out_file.write(' // global com jacobian\\n') out_file.write('", "}\\n\\n') # get a string for the enumeration of joints", "* Rti : transpose matrix of Ri\\n') out_file.write(' * xji", "range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_elb[j], 'q{}'.format(i+1)) if cur_jac", "origin: in the waist, middle point between the two pitch", "new_matrix[i,j] = sp.Symbol(elem_name) return new_matrix, save_matrix # write symbolic jacobian", "== 1: return np.array([[elem], [0.0], [0.0]]) elif axis == 2:", "between the two pitch hip rotations # inertial frame: located", "count_elem(in_file, '{}'.format(elem)) >= 1: count = 0 with open(in_file,'r') as", "exit() # compute derivative result = 0 # cosine if", "the beginning of the file def write_file_beginning(out_file, joint_id_names): out_file.write('/*! \\n')", "# declaration if len(line.split('// -- variables initialization -- //')) !=", "# jacobian xg out_file.write('\\n // jacobian com absolute positions\\n') out_file.write('", "= nb_bodies*[None] xgp = nb_bodies*[None] for i in range(0, nb_bodies):", "range(1, len(cur_split)): new_string = '{}_{}'.format(new_string, cur_split[i].upper()) return new_string # write", "* \\\\author <NAME>\\n') out_file.write(' * \\\\file forward_kinematics.cc\\n') out_file.write(' * \\\\brief", "nb_bodies): Rd[i] = get_rotation_matrix(rot_axis[i], 1, sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1))) R[i] = Rd[i]", "0.0, sp.Symbol('DPT_3_12')]) Dpt[6] = sp.Matrix([0.0, 0.0, 0.0]) # left leg", "write_file_end(file_temp) file_temp.close() # output file out_file = open('./{}.cc'.format(out_file_name), 'w') with", "waist 0, 1, 2, 3, 4, 5, # right leg", "pos_str == 'c{}'.format(der_q): result += -sp.Symbol('s{}'.format(der_q)) # sine elif pos_str", "derivatives [rad/s]\\n') theta_dot_compute(out_file, om[l_foot_id], 'omega_Lfoot', 'Lfoot') out_file.write('\\n') # compute the", "# Rji : jacobian of 'Ri' # return true if", "= {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // right foot", "detect positive/negative elem_split = elem_str.split('-') cur_len = len(elem_split) if cur_len", "neg_flag = 0 pos_str = elem_split[0] elif cur_len == 2:", "for j in range(0, nb_bodies): if flag_first: out_file.write(' + {}*xgp{}_{}'.format(M[j],", "1.0, 0.0], [-sine, 0.0, cosine]]) elif axis == 3: return", "= 1 out_file.write('({}*xgp{}_{}'.format(M[j], j+1, i+1)) if j == nb_bodies-1: if", "Dpt[4] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')]) Dpt[5] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')])", "left arm ]) # joint names joint_id_names = np.array(['0', #", "declarations nb_contacts = 4 x_r_foot = x[r_foot_id] x_l_foot = x[l_foot_id]", "and derivative) of the waist and the torso def torso_waist_angles(out_file,", "out_file.write('\\n // jacobian anchor point positions\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "in range(0, nb_contacts): for j in range (1, nb_bodies): flag_print", "points jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0 for", "for i in range(min, max+1): out_file.write('{}{}'.format(prefix, i)) if i ==", "out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}] = {};\\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\\n\\n') out_file.write('", "= sp.zeros(3, 3) save_matrix = 9 * [None] for i", "* // 02 08 //\\n') out_file.write(' * // 03 09", "arm 2, 1, 3, 2 # left arm ]) #", "# other else: print('Error: {} * counted , only implemented", "') for i in range(0, nb_bodies): out_file.write('{}'.format(M[i])) if i ==", "arm 'M_26', 'M_27', 'M_28', 'M_29' # left arm ]) #", "i+1, j+1, end_name) out_file.write(' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','')) new_matrix[i,j] =", "for line in f: cut_line = line.split(' = ') if", "for the COMAN model\\n') out_file.write(' */\\n\\n') out_file.write('// joints enumeration\\n') out_file.write('enum", "sp.Symbol('L_3_24')]) Dg[19] = sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')]) # left arm Dg[20]", "count # print the declaration of an element def print_declaration_elem(in_file,", "// -- variables initialization -- //\\n') out_file.write('\\n // IMU -", "1) Dpt_r_foot_cont[0][0] = x_min Dpt_r_foot_cont[1][0] = x_min Dpt_r_foot_cont[2][0] = x_max", "== 2: out_file.write(';\\n') else: out_file.write(', ') # print variables declaration", "// joint relative velocities\\n') for i in range(1, nb_bodies): out_file.write('", "sin(in_out.theta_Rfoot[1]);\\n') out_file.write(' s_y_Lfoot = sin(in_out.theta_Lfoot[1]);\\n') out_file.write(' s_z_Rfoot = sin(in_out.theta_Rfoot[2]);\\n') out_file.write('", "= symbolic_jacob_der(Rj, xj, xgj, R_r_elb[j], 'q{}'.format(i+1)) if cur_jac != 0:", "body # DGi : position vector from the anchor point", "= atan2(-{}, sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1]))", "> 0: # epsilon = 1 -> pitch angle in", "no product if cur_len == 1: result += der_elem(cur_term_split[0], Rj,", "[0.0], [0.0]]) elif axis == 2: return np.array([[0.0], [elem], [0.0]])", "r_wrist_z # computation om_tilde_r_elb = get_tilde(om_r_elb) om_tilde_l_elb = get_tilde(om_l_elb) x_r", "# compute the derivative of an element (for jacobian) def", "= sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0]) Dpt[18] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')]) Dpt[19]", "in range(1, nb_bodies): out_file.write(' s{} = sin(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n //", "* s_y_{} * (s_z_{}*{} + c_z_{}*{}) + {};\\n'.format(omega_out, body_part, body_part,", "= symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1)) if cur_jac != 0:", "[None] xgj_print[i][j] = 3 * [None] # rotation matrices out_file.write('\\n", "i in range(0, nb_bodies): for j in range(1, nb_bodies): flag_print", "filter(None, re.split(\"([A-Z][^A-Z]*)\", new_string)) new_string = cur_split[0].upper() for i in range(1,", "-{});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) # compute the time derivatives of 'yaw_pitch_roll_angles'", "j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_elb[j], 'q{}'.format(i+1))", "# symbolic variables declarations x_r_elb = x[r_elb_id] x_l_elb = x[l_elb_id]", "= sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')]) # right arm Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'),", "om = nb_bodies*[None] om_tilde = nb_bodies*[None] Om[0] = sp.zeros(3, 1)", "= cos(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n // joint sines\\n') for i in", "xgj, x_r_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0: if not flag_first:", "save_matrix[3*i+j] = None else: elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name)", "# epsilon = -1 -> pitch angle in [pi/2 ;", "= 1 elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name) out_file.write(' {}", "1) xgj[i][j] = sp.zeros(3, 1) Rj_print[i][j] = 9 * [None]", ": transpose matrix of Ri # xji : jacobian of", "nb_contacts = 4 x_r_foot = x[r_foot_id] x_l_foot = x[l_foot_id] xp_r_foot", "nb_contacts): for j in range(0, 3): out_file.write(' in_out.r_Rfoot_cont[{}][{}] = {};\\n'.format(i,", "matrix # Rti : transpose matrix of Ri # xji", "i+1, j+1, end_name) save_matrix[3*i+j] = ' {} = {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','')", "= 3 * [None] # rotation matrices out_file.write('\\n // rotation", "sp.Symbol('Om{}'.format(i+1))) om[i] = om[parent_id] + Rt[parent_id] * Om[i] om[i] =", "Rj, xj, xgj and xgj (jacobian) Rj = nb_bodies*[None] xj", "out_file.write('\\n') out_file.write(' // right wrist absolute orientation jacobian\\n') out_file.write(' if", "an element (for jacobian) def der_elem(elem_str, Rj, xj, xgj, der_var):", "// right foot absolute velocity\\n') for i in range(0,3): out_file.write('", "sp.Matrix([0.0, 0.0, 0.0]) # left leg Dpt[7] = sp.Matrix([0.0, sp.Symbol('DPT_2_3'),", "the anchor point of the previous body to the current", "if len(cut_line) == 2: if len(cut_line[0].split('[')) == 1: if count", "= sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')]) # left arm Dg[20] = sp.Matrix([sp.Symbol('L_1_26'),", "in range(1, nb_bodies): out_file.write(' c{} = cos(in_out.q_mot[{}]);\\n'.format(i+1, joint_id_names[i])) out_file.write('\\n //", "* [None] for i in range(0,3): for j in range(0,3):", "i in range(0, 3): for j in range(0, 3): R[0][i,j]", "]) # joint names joint_id_names = np.array(['0', # waist 'RightHipPitch_id',", "int(value.split('_')[0].split('R')[1]) b = int(value.split('_')[1][0]) c = int(value.split('_')[1][1]) return True, a,", "20 01 24 //\\n') out_file.write(' * // 02 08 //\\n')", "x[parent_id] + Rt[parent_id] * Dpt[i] xp[i] = xp[parent_id] + om_tilde[parent_id]", "return np.array([[0.0], [0.0], [-elem]]) else: return np.array([]) # compute the", "out_file.write(' // right foot jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first", "sp.Symbol('DPT_2_6'), 0.0]) Dpt[3] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')]) Dpt[4] = sp.Matrix([0.0,", "temporary file os.remove(in_temp) # main script # rotation axis for", "# 05 11 # # 06 12 # # 07", "= symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1)) if cur_jac != 0:", "in_out.{}[1] = atan2(-{}, -sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1],", "loop on all the lines for line in f: #", "R[l_elb_id] Dpt_r_wrist = sp.zeros(3, 1) Dpt_l_wrist = sp.zeros(3, 1) Dpt_r_wrist[0]", "-0.02, -0.005, -0.225) torso_waist_angles(out_file, R, om, 0, 15) # generate", "Dpt_r_foot x_l = x_l_foot + R_l_foot.T * Dpt_l_foot xp_r =", "1, 2, # left leg 1, 2, 3, # trunk", "xgp[i] = xp[i] + om_tilde[i] * (Rt[i] * Dg[i]) write_symb_xgj(nb_bodies,", "matrix between body i and its predecessor # si :", "trunk 'RightShPitch_id' , 'RightShRoll_id' , 'RightShYaw_id' , 'RightElbPitch_id', # right", "out_file.write(' * // 02 08 //\\n') out_file.write(' * // 03", ": absolute rotational vector of body i # Ri :", "0.0, -v[0]], [-v[1], v[0], 0.0]]) # get rotation matrix def", "matrix if rot_flag: result += Rj[a-1][der_q-1][(b-1)*3+(c-1)] # vector elif vec_flag:", "range(0, 9): out_file.write(' in_out.Lfoot_or[{}] = {};\\n'.format(i, R_l_foot[i])) out_file.write('\\n') out_file.write(' //", "2: new_string = cur_split[0] for i in range(1, len(cur_split)-1): new_string", "= symbolic_jacob_der(Rj, xj, xgj, R_l_foot[j], 'q{}'.format(i+1)) if cur_jac != 0:", "'M_19', 'M_20', 'M_21', # trunk 'M_22', 'M_23', 'M_24', 'M_25', #", "= cur_split[0] for i in range(1, len(cur_split)-1): new_string = '{}{}'.format(new_string,", "write_file_end(out_file): out_file.write('}\\n') # print matrix components declaration def write_matrix_declaration(out_file, prefix):", "if len(cut_line_2[0].split('[')) == 1: if count == 0: out_write.write(' double", "= nb_bodies*[None] xj = nb_bodies*[None] xgj = nb_bodies*[None] Rj_print =", "Dpt[0] xp[0] = om_tilde[0] * (Rt[0] * Dpt[0]) write_symb_xj(nb_bodies, Rj,", "hip rotations # inertial frame: located at the origin (waist),", "arm 'LeftShPitch_id' , 'LeftShRoll_id' , 'LeftShYaw_id' , 'LeftElbPitch_id' # left", "the origin (waist), but aligned with the ground (info from", "cur_len == 1: # positive neg_flag = 0 pos_str =", "'omega_Rfoot', 'Rfoot') out_file.write('\\n') out_file.write(' // left foot orientation angle derivatives", "range(0,3): out_file.write(' in_out.rp_Rwrist[{}] = {};\\n'.format(i, xp_r[i])) out_file.write('\\n') out_file.write(' // right", "== 1: return np.array([[1.0, 0.0, 0.0], [0.0, cosine, sine], [0.0,", "def symbolic_jacob_der(Rj, xj, xgj, symb_var, der_var): # list of all", "out_file.write('\\n // joint absolute velocities\\n') Om = nb_bodies*[None] om =", "writing outputs out_file.write(' // right foot absolute position\\n') for i", "the time derivatives of 'yaw_pitch_roll_angles' def theta_dot_compute(out_file, omega_in, omega_out, body_part):", "cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_foot[j], 'q{}'.format(i+1)) if cur_jac !=", "range(0, 9): out_file.write(' in_out.Rfoot_or[{}] = {};\\n'.format(i, R_r_foot[i])) out_file.write('\\n') for i", "r_wrist_y Dpt_r_wrist[2] = r_wrist_z Dpt_l_wrist[0] = r_wrist_x Dpt_l_wrist[1] = -r_wrist_y", "*/\\n\\n') out_file.write('// joints enumeration\\n') out_file.write('enum {') count = 0 for", "body_part, body_part, omega_in[1], body_part, omega_in[0], omega_in[2])) # angles (position and", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Rwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j,", "out_file.write(' in_out.r_Lwrist_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' //", "elif not flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.Rfoot_or_der[{}][{}] =", "(1, nb_bodies): flag_print = 0 for j in range(0,9): cur_jac", "# # 07 13 # # # ###################### # #", "= nb_bodies*[None] # waist Dpt[0] = sp.Matrix([0.0, 0.0, 0.0]) #", "Dpt[9] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')]) Dpt[10] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')])", "*\\n') out_file.write(' * Di : position vector from the anchor", "direct, elem): if direct: if axis == 1: return np.array([[elem],", "of the previous body\\n') out_file.write(' * DGi : position vector", "= ') if len(cut_line_1) == 2 and len(cut_line_2) == 2:", "= get_tilde(om_r_elb) om_tilde_l_elb = get_tilde(om_l_elb) x_r = x_r_elb + R_r_elb.T", "= nb_bodies*[None] xp = nb_bodies*[None] x[0] = Rt[0] * Dpt[0]", "main script # rotation axis for each joint before body", "arm 15, 20, 21, 22 # left arm ]) nb_bodies", "by its name def write_symb_matrix(out_file, matrix, start_name, end_name): new_matrix =", "b = int(value.split('_')[1]) return True, a, b except: return False,", "absolute position vector of the COM G_i of body i\\n')", "0.0, 0.0], [0.0, cosine, sine], [0.0, -sine, cosine]]) elif axis", "the relative frame of the current body i # Omi", "in range(1, nb_bodies): for j in range(0, 3): out_file.write(' in_out.r_COM_der[{}][{}]", "xp_r_foot = xp[r_foot_id] xp_l_foot = xp[l_foot_id] om_r_foot = om[r_foot_id] om_l_foot", "joint_id_names[i])) # write symbolic vector and replace symbolic variable by", "Dpt_l_wrist[2] = r_wrist_z # computation om_tilde_r_elb = get_tilde(om_r_elb) om_tilde_l_elb =", "/ c_y_torso;\\n\\n') out_file.write(' // waist orientation angle derivatives [rad/s]\\n') theta_dot_compute(out_file,", "neg_flag: result = -result return result # compute the derivative", "Dg, M) # end of the file write_file_end(file_temp) file_temp.close() #", "= sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0]) Dpt[3] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')]) Dpt[4]", "sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0]) Dpt[18] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')]) Dpt[19] =", "i in range (1, nb_bodies): new_matrix = sp.zeros(3, 3) #", "out_file.write(' omega_{} = in_out.IMU_Angular_Rate[{}];\\n'.format(i+1, i)) out_file.write('\\n // joint cosines\\n') for", "omega_in[0])) out_file.write(' in_out.{}[2] = inv_c_y_{} * s_y_{} * (s_z_{}*{} +", "previous body to the current body i # (previous body", "1) flag_print = 0 for i in range(0,3): if vector[i]", "the inertial frame) # of the anchor point of body", "derivative) def yaw_pitch_roll_angles(out_file, angle_name, R_matrix, epsilon): if epsilon > 0:", "(position and derivative) of the waist and the torso def", "symbolic computation -- //\\n') # Rj, xj, xgj and xgj", "yaw angles (and derivative) def yaw_pitch_roll_angles(out_file, angle_name, R_matrix, epsilon): if", "= sp.Matrix([sp.Symbol('L_1_18'), 0.0 , sp.Symbol('L_3_18')]) # trunk Dg[13] = sp.Matrix([sp.Symbol('L_1_19'),", "der_elem(cur_term_split[0], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[1].strip()) result += der_elem(cur_term_split[1], Rj, xj,", "= cos(in_out.theta_Rfoot[1]);\\n') out_file.write(' c_y_Lfoot = cos(in_out.theta_Lfoot[1]);\\n') out_file.write(' c_z_Rfoot = cos(in_out.theta_Rfoot[2]);\\n')", "xp_l[i])) out_file.write('\\n') out_file.write(' // left wrist jacobian\\n') out_file.write(' if (flag_jacob)\\n", "R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, sqrt({}*{} + {}*{}));\\n'.format(angle_name, R_matrix[2], R_matrix[0],", "//\\n') out_file.write(' * // 07 13 //\\n') out_file.write(' * //", "= atan2({}, {});\\n'.format(angle_name, R_matrix[1], R_matrix[0])) else: # epsilon = -1", "10, 11, # left leg 0, 13, 14, # trunk", "'M_13', 'M_14', 'M_15', 'M_16', 'M_17', 'M_18', # left leg 'M_19',", "m_tot = ') for i in range(0, nb_bodies): out_file.write('{}'.format(M[i])) if", "1) out_file.write('\\n') out_file.write(' // left foot orientation matrix as angles", "'w') # beginning of the file write_file_beginning(file_temp, joint_id_names) # variables", "\\\\brief forward kinematics computation for the COMAN model\\n') out_file.write(' */\\n\\n')", "double {}'.format(cut_line_2[0].strip())) else: out_write.write(', {}'.format(cut_line_2[0].strip())) count += 1 if count", "def write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, x_vector, index): # loop", "get_rotation_matrix(rot_axis[i], 1, sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1))) R[i] = Rd[i] * R[parent_body_index[i]] write_symb_Rj(nb_bodies,", "'RightShRoll_id' , 'RightShYaw_id' , 'RightElbPitch_id', # right arm 'LeftShPitch_id' ,", "absolute position\\n') for i in range(0,3): out_file.write(' in_out.r_Rwrist[{}] = {};\\n'.format(i,", "= 1 out_file.write('\\n') out_file.write('{}'.format(xgj_print[i][j][k])) out_file.write(' }\\n') # results out_file.write('\\n //", "enumeration of joints def get_string_enum(cur_string): cur_split = cur_string.split('_') if len(cur_split)", "# sine elif pos_str == 's{}'.format(der_q): result += sp.Symbol('c{}'.format(der_q)) #", ": absolute rotational matrix # Rti : transpose matrix of", "j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // left foot absolute position\\n')", "R[r_foot_id], 1) out_file.write('\\n') out_file.write(' // left foot orientation matrix as", "/ c_y_Rfoot;\\n') out_file.write(' inv_c_y_Lfoot = 1.0 / c_y_Lfoot;\\n\\n') out_file.write(' //", "= {};\\n'.format(elem_name, matrix[i,j]).replace('1.0*','') new_matrix[i,j] = sp.Symbol(elem_name) return new_matrix, save_matrix #", "8, 9, 10, 11, # left leg 0, 13, 14,", "def print_all_declaration(in_file, out_write, nb_max_char): count = 0 with open(in_file,'r') as", "range(0,3): out_file.write('{}{}{}'.format(prefix, i+1, j+1)) if i == 2 and j", "out_file.write('\\n') out_file.write(' // right foot jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n')", "terms term_list = str(symb_var).replace('- ','-').replace('-','+-').split('+') if term_list[0] == '': term_list.pop(0)", "rot_axis, parent_body_index, Dpt, Dg, M): out_file.write('\\n\\n // -- symbolic computation", "}\\n') # results out_file.write('\\n // -- Collecting results -- //\\n\\n')", "R[l_foot_id] Dpt_r_foot = sp.zeros(3, 1) Dpt_l_foot = sp.zeros(3, 1) Dpt_r_foot[2]", "Rj, xj, xgj, xgj_print, x_vector, index): # loop on all", "foot jacobian\\n') out_file.write(' if (flag_jacob)\\n {\\n') flag_first = 0 for", "sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')]) Dpt[6] = sp.Matrix([0.0, 0.0, 0.0]) # left", "out_file.write(' }\\n\\n') out_file.write(' // right foot orientation matrix as angles", "COM (center of mass) G_i, # expressed in the relative", "for i in range(0, 9): out_file.write(' in_out.Lwrist_or[{}] = {};\\n'.format(i, R_l_elb[i]))", "generate the symbolic output file def gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index,", "{};\\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\\n\\n') out_file.write(' // right foot contact", "2, 1, 2, # right leg 2, 1, 3, 2,", "c_z_{}*{}) + {};\\n'.format(omega_out, body_part, body_part, body_part, omega_in[1], body_part, omega_in[0], omega_in[2]))", "matrix[i,j]).replace('1.0*','') new_matrix[i,j] = sp.Symbol(elem_name) return new_matrix, save_matrix # write symbolic", "'LeftHipYaw_id' , 'LeftKneePitch_id' , 'LeftFootRoll_id' , 'LeftFootPitch_id' , # left", "s_z_torso = sin(in_out.theta_torso[2]);\\n\\n') out_file.write(' if ((!c_y_waist) || (!c_y_torso))\\n {\\n') out_file.write('", "1) out_file.write('\\n') out_file.write(' // torso orientation matrix as angles [rad]\\n')", "21 //\\n') out_file.write(' * // 18 15 22 //\\n') out_file.write('", "flag_print: flag_print = 1 out_file.write('\\n') out_file.write(' in_out.r_Lfoot_der[{}][{}] = {};\\n'.format(get_string_enum(joint_id_names[i]), j,", "new_vector[i] = vector[i] save_vector[i] = None else: elem_name = '{}{}{}'.format(start_name,", "flag_first: out_file.write(' + {}*{}'.format(M[j], xg[j][i])) else: flag_first = 1 out_file.write('({}*xg{}_{}'.format(M[j],", "j == 2: out_file.write(';\\n') else: out_file.write(', ') # print variables", "out_file.write(' * frame of the previous body\\n') out_file.write(' * DGi", "* Ri : absolute rotational matrix\\n') out_file.write(' * Rti :", "nb_bodies*[None] Rd = nb_bodies*[None] Rd[0] = sp.zeros(3, 3) R[0] =", "[sine, cosine, 0.0], [0.0, 0.0, 1.0]]) else: return np.array([]) #", "= sin(in_out.theta_Lfoot[1]);\\n') out_file.write(' s_z_Rfoot = sin(in_out.theta_Rfoot[2]);\\n') out_file.write(' s_z_Lfoot = sin(in_out.theta_Lfoot[2]);\\n\\n')", "'q{}'.format(i+1)) [Rj[index-1][i], Rj_print[index-1][i]] = print_save_symb_matrix(new_matrix, 'R{}_'.format(index), '_d{}'.format(i+1)) # write symbolic", "loop on all the lines for line in f: cut_line_1", "right leg Dpt[1] = sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0]) Dpt[2] = sp.Matrix([0.0,", "sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')]) Dg[14] = sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')]) Dg[15] =", "0 for j in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj,", "G_i,\\n') out_file.write(' * expressed in the relative frame of the", "compute derivative result = 0 # cosine if pos_str ==", "elem_str.split('-') cur_len = len(elem_split) if cur_len == 1: # positive", "nb_bodies): flag_print = 0 for j in range(0, 3): cur_jac" ]
[ "(for example 5d or 12h or 30m).\") parser.add_argument(\"-o\", \"--output\", help=\"Output", "coding: utf-8 -*- from pymisp import PyMISP from keys import", "5d or 12h or 30m).\") parser.add_argument(\"-o\", \"--output\", help=\"Output file\") args", "# Usage for pipe masters: ./last.py -l 5h | jq", "| jq . def init(url, key): return PyMISP(url, key, misp_verifycert,", "python # -*- coding: utf-8 -*- from pymisp import PyMISP", "events from a MISP instance.') parser.add_argument(\"-l\", \"--last\", required=True, help=\"can be", "days, hours, minutes (for example 5d or 12h or 30m).\")", "if out is None: if 'response' in result: print(json.dumps(result['response'])) else:", "'json') def download_last(m, last, out=None): result = m.download_last(last) if out", "parser.parse_args() if args.output is not None and os.path.exists(args.output): print('Output file", "<reponame>0xiso/PyMISP #!/usr/bin/env python # -*- coding: utf-8 -*- from pymisp", "required=True, help=\"can be defined in days, hours, minutes (for example", "else: print('No results for that time period') exit(0) else: with", "result = m.download_last(last) if out is None: if 'response' in", "misp_verifycert import argparse import os import json # Usage for", "import os import json # Usage for pipe masters: ./last.py", "import json # Usage for pipe masters: ./last.py -l 5h", "for pipe masters: ./last.py -l 5h | jq . def", "with open(out, 'w') as f: f.write(json.dumps(result['response'])) if __name__ == '__main__':", "from pymisp import PyMISP from keys import misp_url, misp_key, misp_verifycert", "-*- from pymisp import PyMISP from keys import misp_url, misp_key,", "print('Output file already exists, abord.') exit(0) misp = init(misp_url, misp_key)", "file\") args = parser.parse_args() if args.output is not None and", "Usage for pipe masters: ./last.py -l 5h | jq .", "def download_last(m, last, out=None): result = m.download_last(last) if out is", "last, out=None): result = m.download_last(last) if out is None: if", "that time period') exit(0) else: with open(out, 'w') as f:", "f.write(json.dumps(result['response'])) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Download latest events", "args.output is not None and os.path.exists(args.output): print('Output file already exists,", "return PyMISP(url, key, misp_verifycert, 'json') def download_last(m, last, out=None): result", "file already exists, abord.') exit(0) misp = init(misp_url, misp_key) download_last(misp,", "exists, abord.') exit(0) misp = init(misp_url, misp_key) download_last(misp, args.last, args.output)", "pymisp import PyMISP from keys import misp_url, misp_key, misp_verifycert import", "misp_key, misp_verifycert import argparse import os import json # Usage", "5h | jq . def init(url, key): return PyMISP(url, key,", "for that time period') exit(0) else: with open(out, 'w') as", "os import json # Usage for pipe masters: ./last.py -l", "argparse.ArgumentParser(description='Download latest events from a MISP instance.') parser.add_argument(\"-l\", \"--last\", required=True,", "\"--last\", required=True, help=\"can be defined in days, hours, minutes (for", "period') exit(0) else: with open(out, 'w') as f: f.write(json.dumps(result['response'])) if", "= parser.parse_args() if args.output is not None and os.path.exists(args.output): print('Output", "\"--output\", help=\"Output file\") args = parser.parse_args() if args.output is not", "if 'response' in result: print(json.dumps(result['response'])) else: print('No results for that", "12h or 30m).\") parser.add_argument(\"-o\", \"--output\", help=\"Output file\") args = parser.parse_args()", "result: print(json.dumps(result['response'])) else: print('No results for that time period') exit(0)", "minutes (for example 5d or 12h or 30m).\") parser.add_argument(\"-o\", \"--output\",", "if __name__ == '__main__': parser = argparse.ArgumentParser(description='Download latest events from", "instance.') parser.add_argument(\"-l\", \"--last\", required=True, help=\"can be defined in days, hours,", "== '__main__': parser = argparse.ArgumentParser(description='Download latest events from a MISP", "already exists, abord.') exit(0) misp = init(misp_url, misp_key) download_last(misp, args.last,", "'__main__': parser = argparse.ArgumentParser(description='Download latest events from a MISP instance.')", "misp_url, misp_key, misp_verifycert import argparse import os import json #", "None and os.path.exists(args.output): print('Output file already exists, abord.') exit(0) misp", "if args.output is not None and os.path.exists(args.output): print('Output file already", "# -*- coding: utf-8 -*- from pymisp import PyMISP from", "'response' in result: print(json.dumps(result['response'])) else: print('No results for that time", "help=\"can be defined in days, hours, minutes (for example 5d", "-*- coding: utf-8 -*- from pymisp import PyMISP from keys", "utf-8 -*- from pymisp import PyMISP from keys import misp_url,", "./last.py -l 5h | jq . def init(url, key): return", "-l 5h | jq . def init(url, key): return PyMISP(url,", "= m.download_last(last) if out is None: if 'response' in result:", "parser.add_argument(\"-o\", \"--output\", help=\"Output file\") args = parser.parse_args() if args.output is", "PyMISP(url, key, misp_verifycert, 'json') def download_last(m, last, out=None): result =", "out is None: if 'response' in result: print(json.dumps(result['response'])) else: print('No", "or 12h or 30m).\") parser.add_argument(\"-o\", \"--output\", help=\"Output file\") args =", "masters: ./last.py -l 5h | jq . def init(url, key):", "json # Usage for pipe masters: ./last.py -l 5h |", "defined in days, hours, minutes (for example 5d or 12h", "#!/usr/bin/env python # -*- coding: utf-8 -*- from pymisp import", "parser.add_argument(\"-l\", \"--last\", required=True, help=\"can be defined in days, hours, minutes", "and os.path.exists(args.output): print('Output file already exists, abord.') exit(0) misp =", "be defined in days, hours, minutes (for example 5d or", "time period') exit(0) else: with open(out, 'w') as f: f.write(json.dumps(result['response']))", "PyMISP from keys import misp_url, misp_key, misp_verifycert import argparse import", "in days, hours, minutes (for example 5d or 12h or", "help=\"Output file\") args = parser.parse_args() if args.output is not None", "30m).\") parser.add_argument(\"-o\", \"--output\", help=\"Output file\") args = parser.parse_args() if args.output", "None: if 'response' in result: print(json.dumps(result['response'])) else: print('No results for", "is None: if 'response' in result: print(json.dumps(result['response'])) else: print('No results", "key): return PyMISP(url, key, misp_verifycert, 'json') def download_last(m, last, out=None):", "key, misp_verifycert, 'json') def download_last(m, last, out=None): result = m.download_last(last)", "print(json.dumps(result['response'])) else: print('No results for that time period') exit(0) else:", "as f: f.write(json.dumps(result['response'])) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Download", "a MISP instance.') parser.add_argument(\"-l\", \"--last\", required=True, help=\"can be defined in", "from keys import misp_url, misp_key, misp_verifycert import argparse import os", "in result: print(json.dumps(result['response'])) else: print('No results for that time period')", "args = parser.parse_args() if args.output is not None and os.path.exists(args.output):", "example 5d or 12h or 30m).\") parser.add_argument(\"-o\", \"--output\", help=\"Output file\")", "download_last(m, last, out=None): result = m.download_last(last) if out is None:", "pipe masters: ./last.py -l 5h | jq . def init(url,", "__name__ == '__main__': parser = argparse.ArgumentParser(description='Download latest events from a", "open(out, 'w') as f: f.write(json.dumps(result['response'])) if __name__ == '__main__': parser", ". def init(url, key): return PyMISP(url, key, misp_verifycert, 'json') def", "import misp_url, misp_key, misp_verifycert import argparse import os import json", "print('No results for that time period') exit(0) else: with open(out,", "misp_verifycert, 'json') def download_last(m, last, out=None): result = m.download_last(last) if", "jq . def init(url, key): return PyMISP(url, key, misp_verifycert, 'json')", "not None and os.path.exists(args.output): print('Output file already exists, abord.') exit(0)", "argparse import os import json # Usage for pipe masters:", "hours, minutes (for example 5d or 12h or 30m).\") parser.add_argument(\"-o\",", "f: f.write(json.dumps(result['response'])) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Download latest", "MISP instance.') parser.add_argument(\"-l\", \"--last\", required=True, help=\"can be defined in days,", "def init(url, key): return PyMISP(url, key, misp_verifycert, 'json') def download_last(m,", "import PyMISP from keys import misp_url, misp_key, misp_verifycert import argparse", "out=None): result = m.download_last(last) if out is None: if 'response'", "m.download_last(last) if out is None: if 'response' in result: print(json.dumps(result['response']))", "= argparse.ArgumentParser(description='Download latest events from a MISP instance.') parser.add_argument(\"-l\", \"--last\",", "from a MISP instance.') parser.add_argument(\"-l\", \"--last\", required=True, help=\"can be defined", "or 30m).\") parser.add_argument(\"-o\", \"--output\", help=\"Output file\") args = parser.parse_args() if", "'w') as f: f.write(json.dumps(result['response'])) if __name__ == '__main__': parser =", "is not None and os.path.exists(args.output): print('Output file already exists, abord.')", "init(url, key): return PyMISP(url, key, misp_verifycert, 'json') def download_last(m, last,", "os.path.exists(args.output): print('Output file already exists, abord.') exit(0) misp = init(misp_url,", "latest events from a MISP instance.') parser.add_argument(\"-l\", \"--last\", required=True, help=\"can", "keys import misp_url, misp_key, misp_verifycert import argparse import os import", "results for that time period') exit(0) else: with open(out, 'w')", "else: with open(out, 'w') as f: f.write(json.dumps(result['response'])) if __name__ ==", "parser = argparse.ArgumentParser(description='Download latest events from a MISP instance.') parser.add_argument(\"-l\",", "import argparse import os import json # Usage for pipe", "exit(0) else: with open(out, 'w') as f: f.write(json.dumps(result['response'])) if __name__" ]
[ "from .widget.scene.urls import urlpatterns as scene_urls from .widget.benefit.urls import urlpatterns", "import urlpatterns as special_page_urls from .store.bank_account.urls import urlpatterns as bank_account_urls", "---------------------------------------------- urlpatterns = [ url(r'^$', core_views.index, name='index'), url(r'^categories/', include(category_urls)), url(r'^collections/',", "import urlpatterns as product_urls from .search.urls import urlpatterns as search_urls", "url(r'^categories/', include(category_urls)), url(r'^collections/', include(collection_urls)), url(r'^orders/', include(order_urls)), url(r'^page/', include(page_urls)), url(r'^products/', include(product_urls)),", "social_network_urls from .store.special_page.urls import urlpatterns as special_page_urls from .store.bank_account.urls import", "page_urls from .product.urls import urlpatterns as product_urls from .search.urls import", "url from django.views.generic.base import TemplateView from . import views as", "as slider_urls from .widget.banner.urls import urlpatterns as banner_urls from .widget.scene.urls", "urlpatterns as bank_account_urls from .store.footer_item.urls import urlpatterns as footer_item_urls #", "+ social_network_urls + special_page_urls + bank_account_urls + footer_item_urls)), # Extensions", ".discount.urls import urlpatterns as discount_urls from .menu.urls import urlpatterns as", "from .customer.urls import urlpatterns as customer_urls from .discount.urls import urlpatterns", "from .order.urls import urlpatterns as order_urls from .page.urls import urlpatterns", "# BEGIN :: SoftButterfly Extensions ---------------------------------------- url(r'^brand/', include(brand_urls)), url(r'^slider/', include(slider_urls)),", "include(category_urls)), url(r'^collections/', include(collection_urls)), url(r'^orders/', include(order_urls)), url(r'^page/', include(page_urls)), url(r'^products/', include(product_urls)), url(r'^customers/',", "as social_network_urls from .store.special_page.urls import urlpatterns as special_page_urls from .store.bank_account.urls", "urlpatterns as slider_urls from .widget.banner.urls import urlpatterns as banner_urls from", ".staff.urls import urlpatterns as staff_urls from .taxes.urls import urlpatterns as", "url(r'^products/', include(product_urls)), url(r'^customers/', include(customer_urls)), url(r'^staff/', include(staff_urls)), url(r'^discounts/', include(discount_urls)), url(r'^settings/', include(", "name='styleguide'), url(r'^search/', include(search_urls)), url(r'^taxes/', include(taxes_urls)), url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')), # BEGIN ::", "+ special_page_urls + bank_account_urls + footer_item_urls)), # Extensions url(r'^menu/', include(menu_urls)),", "views as core_views from .category.urls import urlpatterns as category_urls from", "import urlpatterns as order_urls from .page.urls import urlpatterns as page_urls", "as collection_urls from .customer.urls import urlpatterns as customer_urls from .discount.urls", "import urlpatterns as staff_urls from .taxes.urls import urlpatterns as taxes_urls", "---------------------------------------- url(r'^brand/', include(brand_urls)), url(r'^slider/', include(slider_urls)), url(r'^banner/', include(banner_urls)), url(r'^scene/', include(scene_urls)), url(r'^store/',", "include(shipping_urls)), url(r'^style-guide/', core_views.styleguide, name='styleguide'), url(r'^search/', include(search_urls)), url(r'^taxes/', include(taxes_urls)), url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')),", "category_urls from .collection.urls import urlpatterns as collection_urls from .customer.urls import", "urlpatterns as brand_urls from .widget.slider.urls import urlpatterns as slider_urls from", "name='index'), url(r'^categories/', include(category_urls)), url(r'^collections/', include(collection_urls)), url(r'^orders/', include(order_urls)), url(r'^page/', include(page_urls)), url(r'^products/',", ":: SoftButterfly Extensions ---------------------------------------------- urlpatterns = [ url(r'^$', core_views.index, name='index'),", "import urlpatterns as scene_urls from .widget.benefit.urls import urlpatterns as benefit_urls", "SoftButterfly Extensions ---------------------------------------- url(r'^brand/', include(brand_urls)), url(r'^slider/', include(slider_urls)), url(r'^banner/', include(banner_urls)), url(r'^scene/',", "product_urls from .search.urls import urlpatterns as search_urls from .shipping.urls import", "as bank_account_urls from .store.footer_item.urls import urlpatterns as footer_item_urls # END", "as special_page_urls from .store.bank_account.urls import urlpatterns as bank_account_urls from .store.footer_item.urls", ".store.bank_account.urls import urlpatterns as bank_account_urls from .store.footer_item.urls import urlpatterns as", "scene_urls from .widget.benefit.urls import urlpatterns as benefit_urls from .store.physical_store.urls import", "import urlpatterns as store_urls from .store.social_network.urls import urlpatterns as social_network_urls", "urlpatterns as customer_urls from .discount.urls import urlpatterns as discount_urls from", ":: SoftButterfly Extensions -------------------------------------------- from .brand.urls import urlpatterns as brand_urls", ".store.physical_store.urls import urlpatterns as store_urls from .store.social_network.urls import urlpatterns as", "url(r'^collections/', include(collection_urls)), url(r'^orders/', include(order_urls)), url(r'^page/', include(page_urls)), url(r'^products/', include(product_urls)), url(r'^customers/', include(customer_urls)),", "# END :: SoftButterfly Extensions ---------------------------------------------- urlpatterns = [ url(r'^$',", ".store.social_network.urls import urlpatterns as social_network_urls from .store.special_page.urls import urlpatterns as", "import urlpatterns as search_urls from .shipping.urls import urlpatterns as shipping_urls", "= [ url(r'^$', core_views.index, name='index'), url(r'^categories/', include(category_urls)), url(r'^collections/', include(collection_urls)), url(r'^orders/',", "include(taxes_urls)), url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')), # BEGIN :: SoftButterfly Extensions ---------------------------------------- url(r'^brand/',", "url(r'^banner/', include(banner_urls)), url(r'^scene/', include(scene_urls)), url(r'^store/', include(store_urls)), url(r'^benefit/', include(benefit_urls)), # END", "from .store.bank_account.urls import urlpatterns as bank_account_urls from .store.footer_item.urls import urlpatterns", "as benefit_urls from .store.physical_store.urls import urlpatterns as store_urls from .store.social_network.urls", "# BEGIN :: SoftButterfly Extensions -------------------------------------------- from .brand.urls import urlpatterns", ".search.urls import urlpatterns as search_urls from .shipping.urls import urlpatterns as", "from django.views.generic.base import TemplateView from . import views as core_views", "urlpatterns as discount_urls from .menu.urls import urlpatterns as menu_urls from", "core_views from .category.urls import urlpatterns as category_urls from .collection.urls import", "import urlpatterns as discount_urls from .menu.urls import urlpatterns as menu_urls", "urlpatterns as product_urls from .search.urls import urlpatterns as search_urls from", "from .store.special_page.urls import urlpatterns as special_page_urls from .store.bank_account.urls import urlpatterns", "as core_views from .category.urls import urlpatterns as category_urls from .collection.urls", "bank_account_urls from .store.footer_item.urls import urlpatterns as footer_item_urls # END ::", "import TemplateView from . import views as core_views from .category.urls", ".collection.urls import urlpatterns as collection_urls from .customer.urls import urlpatterns as", "import urlpatterns as category_urls from .collection.urls import urlpatterns as collection_urls", "urlpatterns as staff_urls from .taxes.urls import urlpatterns as taxes_urls #", "from .sites.urls import urlpatterns as site_urls from .staff.urls import urlpatterns", "include(discount_urls)), url(r'^settings/', include( site_urls + social_network_urls + special_page_urls + bank_account_urls", "from .search.urls import urlpatterns as search_urls from .shipping.urls import urlpatterns", "from .widget.slider.urls import urlpatterns as slider_urls from .widget.banner.urls import urlpatterns", "urlpatterns as search_urls from .shipping.urls import urlpatterns as shipping_urls from", "social_network_urls + special_page_urls + bank_account_urls + footer_item_urls)), # Extensions url(r'^menu/',", "site_urls from .staff.urls import urlpatterns as staff_urls from .taxes.urls import", "staff_urls from .taxes.urls import urlpatterns as taxes_urls # BEGIN ::", "taxes_urls # BEGIN :: SoftButterfly Extensions -------------------------------------------- from .brand.urls import", "from django.conf.urls import include, url from django.views.generic.base import TemplateView from", "urlpatterns as social_network_urls from .store.special_page.urls import urlpatterns as special_page_urls from", "from .store.physical_store.urls import urlpatterns as store_urls from .store.social_network.urls import urlpatterns", ". import views as core_views from .category.urls import urlpatterns as", ".brand.urls import urlpatterns as brand_urls from .widget.slider.urls import urlpatterns as", "search_urls from .shipping.urls import urlpatterns as shipping_urls from .sites.urls import", ".menu.urls import urlpatterns as menu_urls from .order.urls import urlpatterns as", "Extensions ---------------------------------------------- urlpatterns = [ url(r'^$', core_views.index, name='index'), url(r'^categories/', include(category_urls)),", ".widget.slider.urls import urlpatterns as slider_urls from .widget.banner.urls import urlpatterns as", "menu_urls from .order.urls import urlpatterns as order_urls from .page.urls import", "as site_urls from .staff.urls import urlpatterns as staff_urls from .taxes.urls", "store_urls from .store.social_network.urls import urlpatterns as social_network_urls from .store.special_page.urls import", "brand_urls from .widget.slider.urls import urlpatterns as slider_urls from .widget.banner.urls import", "as footer_item_urls # END :: SoftButterfly Extensions ---------------------------------------------- urlpatterns =", ".sites.urls import urlpatterns as site_urls from .staff.urls import urlpatterns as", "+ bank_account_urls + footer_item_urls)), # Extensions url(r'^menu/', include(menu_urls)), url(r'^shipping/', include(shipping_urls)),", "from .shipping.urls import urlpatterns as shipping_urls from .sites.urls import urlpatterns", "django.views.generic.base import TemplateView from . import views as core_views from", "special_page_urls from .store.bank_account.urls import urlpatterns as bank_account_urls from .store.footer_item.urls import", "as staff_urls from .taxes.urls import urlpatterns as taxes_urls # BEGIN", ".widget.scene.urls import urlpatterns as scene_urls from .widget.benefit.urls import urlpatterns as", "footer_item_urls # END :: SoftButterfly Extensions ---------------------------------------------- urlpatterns = [", "TemplateView from . import views as core_views from .category.urls import", "url(r'^staff/', include(staff_urls)), url(r'^discounts/', include(discount_urls)), url(r'^settings/', include( site_urls + social_network_urls +", "Extensions -------------------------------------------- from .brand.urls import urlpatterns as brand_urls from .widget.slider.urls", "customer_urls from .discount.urls import urlpatterns as discount_urls from .menu.urls import", "include(customer_urls)), url(r'^staff/', include(staff_urls)), url(r'^discounts/', include(discount_urls)), url(r'^settings/', include( site_urls + social_network_urls", "include(store_urls)), url(r'^benefit/', include(benefit_urls)), # END :: SoftButterfly Extensions ------------------------------------------ ]", ".page.urls import urlpatterns as page_urls from .product.urls import urlpatterns as", "import urlpatterns as site_urls from .staff.urls import urlpatterns as staff_urls", "core_views.index, name='index'), url(r'^categories/', include(category_urls)), url(r'^collections/', include(collection_urls)), url(r'^orders/', include(order_urls)), url(r'^page/', include(page_urls)),", "SoftButterfly Extensions -------------------------------------------- from .brand.urls import urlpatterns as brand_urls from", "include(page_urls)), url(r'^products/', include(product_urls)), url(r'^customers/', include(customer_urls)), url(r'^staff/', include(staff_urls)), url(r'^discounts/', include(discount_urls)), url(r'^settings/',", "import include, url from django.views.generic.base import TemplateView from . import", "banner_urls from .widget.scene.urls import urlpatterns as scene_urls from .widget.benefit.urls import", "discount_urls from .menu.urls import urlpatterns as menu_urls from .order.urls import", "import urlpatterns as collection_urls from .customer.urls import urlpatterns as customer_urls", "from .category.urls import urlpatterns as category_urls from .collection.urls import urlpatterns", "from .brand.urls import urlpatterns as brand_urls from .widget.slider.urls import urlpatterns", "import urlpatterns as slider_urls from .widget.banner.urls import urlpatterns as banner_urls", ".shipping.urls import urlpatterns as shipping_urls from .sites.urls import urlpatterns as", "urlpatterns as order_urls from .page.urls import urlpatterns as page_urls from", "include(product_urls)), url(r'^customers/', include(customer_urls)), url(r'^staff/', include(staff_urls)), url(r'^discounts/', include(discount_urls)), url(r'^settings/', include( site_urls", "url(r'^menu/', include(menu_urls)), url(r'^shipping/', include(shipping_urls)), url(r'^style-guide/', core_views.styleguide, name='styleguide'), url(r'^search/', include(search_urls)), url(r'^taxes/',", "include(menu_urls)), url(r'^shipping/', include(shipping_urls)), url(r'^style-guide/', core_views.styleguide, name='styleguide'), url(r'^search/', include(search_urls)), url(r'^taxes/', include(taxes_urls)),", "url(r'^style-guide/', core_views.styleguide, name='styleguide'), url(r'^search/', include(search_urls)), url(r'^taxes/', include(taxes_urls)), url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')), #", ".store.footer_item.urls import urlpatterns as footer_item_urls # END :: SoftButterfly Extensions", "url(r'^slider/', include(slider_urls)), url(r'^banner/', include(banner_urls)), url(r'^scene/', include(scene_urls)), url(r'^store/', include(store_urls)), url(r'^benefit/', include(benefit_urls)),", "include(slider_urls)), url(r'^banner/', include(banner_urls)), url(r'^scene/', include(scene_urls)), url(r'^store/', include(store_urls)), url(r'^benefit/', include(benefit_urls)), #", "include(scene_urls)), url(r'^store/', include(store_urls)), url(r'^benefit/', include(benefit_urls)), # END :: SoftButterfly Extensions", "import urlpatterns as social_network_urls from .store.special_page.urls import urlpatterns as special_page_urls", ".widget.benefit.urls import urlpatterns as benefit_urls from .store.physical_store.urls import urlpatterns as", "import urlpatterns as shipping_urls from .sites.urls import urlpatterns as site_urls", "from . import views as core_views from .category.urls import urlpatterns", "from .discount.urls import urlpatterns as discount_urls from .menu.urls import urlpatterns", "urlpatterns as banner_urls from .widget.scene.urls import urlpatterns as scene_urls from", "url(r'^$', core_views.index, name='index'), url(r'^categories/', include(category_urls)), url(r'^collections/', include(collection_urls)), url(r'^orders/', include(order_urls)), url(r'^page/',", "site_urls + social_network_urls + special_page_urls + bank_account_urls + footer_item_urls)), #", "from .menu.urls import urlpatterns as menu_urls from .order.urls import urlpatterns", "+ footer_item_urls)), # Extensions url(r'^menu/', include(menu_urls)), url(r'^shipping/', include(shipping_urls)), url(r'^style-guide/', core_views.styleguide,", "url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')), # BEGIN :: SoftButterfly Extensions ---------------------------------------- url(r'^brand/', include(brand_urls)),", "as page_urls from .product.urls import urlpatterns as product_urls from .search.urls", "url(r'^taxes/', include(taxes_urls)), url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')), # BEGIN :: SoftButterfly Extensions ----------------------------------------", "url(r'^settings/', include( site_urls + social_network_urls + special_page_urls + bank_account_urls +", "url(r'^orders/', include(order_urls)), url(r'^page/', include(page_urls)), url(r'^products/', include(product_urls)), url(r'^customers/', include(customer_urls)), url(r'^staff/', include(staff_urls)),", ".product.urls import urlpatterns as product_urls from .search.urls import urlpatterns as", "as search_urls from .shipping.urls import urlpatterns as shipping_urls from .sites.urls", "as customer_urls from .discount.urls import urlpatterns as discount_urls from .menu.urls", ".widget.banner.urls import urlpatterns as banner_urls from .widget.scene.urls import urlpatterns as", "BEGIN :: SoftButterfly Extensions ---------------------------------------- url(r'^brand/', include(brand_urls)), url(r'^slider/', include(slider_urls)), url(r'^banner/',", "as taxes_urls # BEGIN :: SoftButterfly Extensions -------------------------------------------- from .brand.urls", "as store_urls from .store.social_network.urls import urlpatterns as social_network_urls from .store.special_page.urls", "url(r'^discounts/', include(discount_urls)), url(r'^settings/', include( site_urls + social_network_urls + special_page_urls +", "# Extensions url(r'^menu/', include(menu_urls)), url(r'^shipping/', include(shipping_urls)), url(r'^style-guide/', core_views.styleguide, name='styleguide'), url(r'^search/',", "as shipping_urls from .sites.urls import urlpatterns as site_urls from .staff.urls", ":: SoftButterfly Extensions ---------------------------------------- url(r'^brand/', include(brand_urls)), url(r'^slider/', include(slider_urls)), url(r'^banner/', include(banner_urls)),", "import urlpatterns as footer_item_urls # END :: SoftButterfly Extensions ----------------------------------------------", "Extensions ---------------------------------------- url(r'^brand/', include(brand_urls)), url(r'^slider/', include(slider_urls)), url(r'^banner/', include(banner_urls)), url(r'^scene/', include(scene_urls)),", "as order_urls from .page.urls import urlpatterns as page_urls from .product.urls", "urlpatterns as shipping_urls from .sites.urls import urlpatterns as site_urls from", "urlpatterns as benefit_urls from .store.physical_store.urls import urlpatterns as store_urls from", "urlpatterns as page_urls from .product.urls import urlpatterns as product_urls from", "shipping_urls from .sites.urls import urlpatterns as site_urls from .staff.urls import", "from .product.urls import urlpatterns as product_urls from .search.urls import urlpatterns", "url(r'^search/', include(search_urls)), url(r'^taxes/', include(taxes_urls)), url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')), # BEGIN :: SoftButterfly", "from .store.social_network.urls import urlpatterns as social_network_urls from .store.special_page.urls import urlpatterns", "include, url from django.views.generic.base import TemplateView from . import views", "as brand_urls from .widget.slider.urls import urlpatterns as slider_urls from .widget.banner.urls", "url(r'^customers/', include(customer_urls)), url(r'^staff/', include(staff_urls)), url(r'^discounts/', include(discount_urls)), url(r'^settings/', include( site_urls +", ".taxes.urls import urlpatterns as taxes_urls # BEGIN :: SoftButterfly Extensions", "url(r'^store/', include(store_urls)), url(r'^benefit/', include(benefit_urls)), # END :: SoftButterfly Extensions ------------------------------------------", "import urlpatterns as page_urls from .product.urls import urlpatterns as product_urls", "urlpatterns as footer_item_urls # END :: SoftButterfly Extensions ---------------------------------------------- urlpatterns", ".store.special_page.urls import urlpatterns as special_page_urls from .store.bank_account.urls import urlpatterns as", "urlpatterns = [ url(r'^$', core_views.index, name='index'), url(r'^categories/', include(category_urls)), url(r'^collections/', include(collection_urls)),", "bank_account_urls + footer_item_urls)), # Extensions url(r'^menu/', include(menu_urls)), url(r'^shipping/', include(shipping_urls)), url(r'^style-guide/',", "as scene_urls from .widget.benefit.urls import urlpatterns as benefit_urls from .store.physical_store.urls", "include(brand_urls)), url(r'^slider/', include(slider_urls)), url(r'^banner/', include(banner_urls)), url(r'^scene/', include(scene_urls)), url(r'^store/', include(store_urls)), url(r'^benefit/',", "from .staff.urls import urlpatterns as staff_urls from .taxes.urls import urlpatterns", "import urlpatterns as taxes_urls # BEGIN :: SoftButterfly Extensions --------------------------------------------", "from .page.urls import urlpatterns as page_urls from .product.urls import urlpatterns", "BEGIN :: SoftButterfly Extensions -------------------------------------------- from .brand.urls import urlpatterns as", "order_urls from .page.urls import urlpatterns as page_urls from .product.urls import", "include(search_urls)), url(r'^taxes/', include(taxes_urls)), url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')), # BEGIN :: SoftButterfly Extensions", "import urlpatterns as bank_account_urls from .store.footer_item.urls import urlpatterns as footer_item_urls", "include(order_urls)), url(r'^page/', include(page_urls)), url(r'^products/', include(product_urls)), url(r'^customers/', include(customer_urls)), url(r'^staff/', include(staff_urls)), url(r'^discounts/',", "as product_urls from .search.urls import urlpatterns as search_urls from .shipping.urls", "TemplateView.as_view(template_name='dashboard/next.html')), # BEGIN :: SoftButterfly Extensions ---------------------------------------- url(r'^brand/', include(brand_urls)), url(r'^slider/',", "END :: SoftButterfly Extensions ---------------------------------------------- urlpatterns = [ url(r'^$', core_views.index,", "from .taxes.urls import urlpatterns as taxes_urls # BEGIN :: SoftButterfly", "collection_urls from .customer.urls import urlpatterns as customer_urls from .discount.urls import", "from .widget.benefit.urls import urlpatterns as benefit_urls from .store.physical_store.urls import urlpatterns", "urlpatterns as special_page_urls from .store.bank_account.urls import urlpatterns as bank_account_urls from", "core_views.styleguide, name='styleguide'), url(r'^search/', include(search_urls)), url(r'^taxes/', include(taxes_urls)), url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')), # BEGIN", ".category.urls import urlpatterns as category_urls from .collection.urls import urlpatterns as", "[ url(r'^$', core_views.index, name='index'), url(r'^categories/', include(category_urls)), url(r'^collections/', include(collection_urls)), url(r'^orders/', include(order_urls)),", "urlpatterns as site_urls from .staff.urls import urlpatterns as staff_urls from", "url(r'^brand/', include(brand_urls)), url(r'^slider/', include(slider_urls)), url(r'^banner/', include(banner_urls)), url(r'^scene/', include(scene_urls)), url(r'^store/', include(store_urls)),", "SoftButterfly Extensions ---------------------------------------------- urlpatterns = [ url(r'^$', core_views.index, name='index'), url(r'^categories/',", "as menu_urls from .order.urls import urlpatterns as order_urls from .page.urls", "url(r'^page/', include(page_urls)), url(r'^products/', include(product_urls)), url(r'^customers/', include(customer_urls)), url(r'^staff/', include(staff_urls)), url(r'^discounts/', include(discount_urls)),", "include(staff_urls)), url(r'^discounts/', include(discount_urls)), url(r'^settings/', include( site_urls + social_network_urls + special_page_urls", "include(banner_urls)), url(r'^scene/', include(scene_urls)), url(r'^store/', include(store_urls)), url(r'^benefit/', include(benefit_urls)), # END ::", "as banner_urls from .widget.scene.urls import urlpatterns as scene_urls from .widget.benefit.urls", "urlpatterns as collection_urls from .customer.urls import urlpatterns as customer_urls from", "as category_urls from .collection.urls import urlpatterns as collection_urls from .customer.urls", "url(r'^scene/', include(scene_urls)), url(r'^store/', include(store_urls)), url(r'^benefit/', include(benefit_urls)), # END :: SoftButterfly", "urlpatterns as category_urls from .collection.urls import urlpatterns as collection_urls from", "import urlpatterns as menu_urls from .order.urls import urlpatterns as order_urls", "from .store.footer_item.urls import urlpatterns as footer_item_urls # END :: SoftButterfly", "include(collection_urls)), url(r'^orders/', include(order_urls)), url(r'^page/', include(page_urls)), url(r'^products/', include(product_urls)), url(r'^customers/', include(customer_urls)), url(r'^staff/',", "include( site_urls + social_network_urls + special_page_urls + bank_account_urls + footer_item_urls)),", "url(r'^shipping/', include(shipping_urls)), url(r'^style-guide/', core_views.styleguide, name='styleguide'), url(r'^search/', include(search_urls)), url(r'^taxes/', include(taxes_urls)), url(r'^next/',", "import views as core_views from .category.urls import urlpatterns as category_urls", "import urlpatterns as banner_urls from .widget.scene.urls import urlpatterns as scene_urls", "-------------------------------------------- from .brand.urls import urlpatterns as brand_urls from .widget.slider.urls import", "urlpatterns as scene_urls from .widget.benefit.urls import urlpatterns as benefit_urls from", ".customer.urls import urlpatterns as customer_urls from .discount.urls import urlpatterns as", "django.conf.urls import include, url from django.views.generic.base import TemplateView from .", "from .widget.banner.urls import urlpatterns as banner_urls from .widget.scene.urls import urlpatterns", "urlpatterns as menu_urls from .order.urls import urlpatterns as order_urls from", "urlpatterns as taxes_urls # BEGIN :: SoftButterfly Extensions -------------------------------------------- from", "footer_item_urls)), # Extensions url(r'^menu/', include(menu_urls)), url(r'^shipping/', include(shipping_urls)), url(r'^style-guide/', core_views.styleguide, name='styleguide'),", "as discount_urls from .menu.urls import urlpatterns as menu_urls from .order.urls", "import urlpatterns as benefit_urls from .store.physical_store.urls import urlpatterns as store_urls", "special_page_urls + bank_account_urls + footer_item_urls)), # Extensions url(r'^menu/', include(menu_urls)), url(r'^shipping/',", "import urlpatterns as customer_urls from .discount.urls import urlpatterns as discount_urls", "slider_urls from .widget.banner.urls import urlpatterns as banner_urls from .widget.scene.urls import", "Extensions url(r'^menu/', include(menu_urls)), url(r'^shipping/', include(shipping_urls)), url(r'^style-guide/', core_views.styleguide, name='styleguide'), url(r'^search/', include(search_urls)),", "benefit_urls from .store.physical_store.urls import urlpatterns as store_urls from .store.social_network.urls import", "urlpatterns as store_urls from .store.social_network.urls import urlpatterns as social_network_urls from", ".order.urls import urlpatterns as order_urls from .page.urls import urlpatterns as", "import urlpatterns as brand_urls from .widget.slider.urls import urlpatterns as slider_urls", "from .collection.urls import urlpatterns as collection_urls from .customer.urls import urlpatterns" ]
[ "sys import torch import yaml from functools import partial sys.path.append('../../../../')", "torch import yaml from functools import partial sys.path.append('../../../../') from trainers", "sys.path.append('../../../../') from trainers import trainer, frn_train from datasets import dataloaders", "import os import sys import torch import yaml from functools", "shots = [args.train_shot, args.train_query_shot] train_loader = dataloaders.meta_train_dataloader(data_path=pm.train, way=train_way, shots=shots, transform_type=args.train_transform_type)", "train_way = args.train_way shots = [args.train_shot, args.train_query_shot] train_loader = dataloaders.meta_train_dataloader(data_path=pm.train,", "import torch import yaml from functools import partial sys.path.append('../../../../') from", "trainer, frn_train from datasets import dataloaders from models.FRN import FRN", "args.train_query_shot] train_loader = dataloaders.meta_train_dataloader(data_path=pm.train, way=train_way, shots=shots, transform_type=args.train_transform_type) model = FRN(way=train_way,", "open('../../../../config.yml', 'r') as f: temp = yaml.safe_load(f) data_path = os.path.abspath(temp['data_path'])", "[args.train_shot, args.train_query_shot] train_loader = dataloaders.meta_train_dataloader(data_path=pm.train, way=train_way, shots=shots, transform_type=args.train_transform_type) model =", "models.FRN import FRN args = trainer.train_parser() with open('../../../../config.yml', 'r') as", "model = FRN(way=train_way, shots=[args.train_shot, args.train_query_shot], resnet=args.resnet) train_func = partial(frn_train.default_train,train_loader=train_loader) tm", "shots=[args.train_shot, args.train_query_shot], resnet=args.resnet) train_func = partial(frn_train.default_train,train_loader=train_loader) tm = trainer.Train_Manager(args,path_manager=pm,train_func=train_func) tm.train(model)", "trainers import trainer, frn_train from datasets import dataloaders from models.FRN", "= dataloaders.meta_train_dataloader(data_path=pm.train, way=train_way, shots=shots, transform_type=args.train_transform_type) model = FRN(way=train_way, shots=[args.train_shot, args.train_query_shot],", "import partial sys.path.append('../../../../') from trainers import trainer, frn_train from datasets", "data_path = os.path.abspath(temp['data_path']) fewshot_path = os.path.join(data_path,'CUB_fewshot_raw') pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args) train_way", "from datasets import dataloaders from models.FRN import FRN args =", "os.path.join(data_path,'CUB_fewshot_raw') pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args) train_way = args.train_way shots = [args.train_shot,", "import FRN args = trainer.train_parser() with open('../../../../config.yml', 'r') as f:", "dataloaders.meta_train_dataloader(data_path=pm.train, way=train_way, shots=shots, transform_type=args.train_transform_type) model = FRN(way=train_way, shots=[args.train_shot, args.train_query_shot], resnet=args.resnet)", "'r') as f: temp = yaml.safe_load(f) data_path = os.path.abspath(temp['data_path']) fewshot_path", "transform_type=args.train_transform_type) model = FRN(way=train_way, shots=[args.train_shot, args.train_query_shot], resnet=args.resnet) train_func = partial(frn_train.default_train,train_loader=train_loader)", "FRN args = trainer.train_parser() with open('../../../../config.yml', 'r') as f: temp", "import sys import torch import yaml from functools import partial", "dataloaders from models.FRN import FRN args = trainer.train_parser() with open('../../../../config.yml',", "way=train_way, shots=shots, transform_type=args.train_transform_type) model = FRN(way=train_way, shots=[args.train_shot, args.train_query_shot], resnet=args.resnet) train_func", "with open('../../../../config.yml', 'r') as f: temp = yaml.safe_load(f) data_path =", "= yaml.safe_load(f) data_path = os.path.abspath(temp['data_path']) fewshot_path = os.path.join(data_path,'CUB_fewshot_raw') pm =", "import dataloaders from models.FRN import FRN args = trainer.train_parser() with", "shots=shots, transform_type=args.train_transform_type) model = FRN(way=train_way, shots=[args.train_shot, args.train_query_shot], resnet=args.resnet) train_func =", "temp = yaml.safe_load(f) data_path = os.path.abspath(temp['data_path']) fewshot_path = os.path.join(data_path,'CUB_fewshot_raw') pm", "pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args) train_way = args.train_way shots = [args.train_shot, args.train_query_shot]", "datasets import dataloaders from models.FRN import FRN args = trainer.train_parser()", "trainer.Path_Manager(fewshot_path=fewshot_path,args=args) train_way = args.train_way shots = [args.train_shot, args.train_query_shot] train_loader =", "= os.path.join(data_path,'CUB_fewshot_raw') pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args) train_way = args.train_way shots =", "args.train_query_shot], resnet=args.resnet) train_func = partial(frn_train.default_train,train_loader=train_loader) tm = trainer.Train_Manager(args,path_manager=pm,train_func=train_func) tm.train(model) tm.evaluate(model)", "= [args.train_shot, args.train_query_shot] train_loader = dataloaders.meta_train_dataloader(data_path=pm.train, way=train_way, shots=shots, transform_type=args.train_transform_type) model", "FRN(way=train_way, shots=[args.train_shot, args.train_query_shot], resnet=args.resnet) train_func = partial(frn_train.default_train,train_loader=train_loader) tm = trainer.Train_Manager(args,path_manager=pm,train_func=train_func)", "os.path.abspath(temp['data_path']) fewshot_path = os.path.join(data_path,'CUB_fewshot_raw') pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args) train_way = args.train_way", "os import sys import torch import yaml from functools import", "= os.path.abspath(temp['data_path']) fewshot_path = os.path.join(data_path,'CUB_fewshot_raw') pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args) train_way =", "from models.FRN import FRN args = trainer.train_parser() with open('../../../../config.yml', 'r')", "args.train_way shots = [args.train_shot, args.train_query_shot] train_loader = dataloaders.meta_train_dataloader(data_path=pm.train, way=train_way, shots=shots,", "= trainer.Path_Manager(fewshot_path=fewshot_path,args=args) train_way = args.train_way shots = [args.train_shot, args.train_query_shot] train_loader", "f: temp = yaml.safe_load(f) data_path = os.path.abspath(temp['data_path']) fewshot_path = os.path.join(data_path,'CUB_fewshot_raw')", "= args.train_way shots = [args.train_shot, args.train_query_shot] train_loader = dataloaders.meta_train_dataloader(data_path=pm.train, way=train_way,", "from functools import partial sys.path.append('../../../../') from trainers import trainer, frn_train", "= trainer.train_parser() with open('../../../../config.yml', 'r') as f: temp = yaml.safe_load(f)", "frn_train from datasets import dataloaders from models.FRN import FRN args", "partial sys.path.append('../../../../') from trainers import trainer, frn_train from datasets import", "yaml.safe_load(f) data_path = os.path.abspath(temp['data_path']) fewshot_path = os.path.join(data_path,'CUB_fewshot_raw') pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args)", "= FRN(way=train_way, shots=[args.train_shot, args.train_query_shot], resnet=args.resnet) train_func = partial(frn_train.default_train,train_loader=train_loader) tm =", "trainer.train_parser() with open('../../../../config.yml', 'r') as f: temp = yaml.safe_load(f) data_path", "args = trainer.train_parser() with open('../../../../config.yml', 'r') as f: temp =", "functools import partial sys.path.append('../../../../') from trainers import trainer, frn_train from", "fewshot_path = os.path.join(data_path,'CUB_fewshot_raw') pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args) train_way = args.train_way shots", "import trainer, frn_train from datasets import dataloaders from models.FRN import", "train_loader = dataloaders.meta_train_dataloader(data_path=pm.train, way=train_way, shots=shots, transform_type=args.train_transform_type) model = FRN(way=train_way, shots=[args.train_shot,", "as f: temp = yaml.safe_load(f) data_path = os.path.abspath(temp['data_path']) fewshot_path =", "import yaml from functools import partial sys.path.append('../../../../') from trainers import", "from trainers import trainer, frn_train from datasets import dataloaders from", "yaml from functools import partial sys.path.append('../../../../') from trainers import trainer," ]