diff --git a/.gitattributes b/.gitattributes index 9cef15336a14ba77bc28ec0f14977fe6b9fedeff..c12353310b14bb699a00799057cd7663d8af776b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1438,3 +1438,4 @@ parrot/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_numeric.c parrot/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +vglm/bin/python filter=lfs diff=lfs merge=lfs -text diff --git a/vglm/bin/python b/vglm/bin/python new file mode 100644 index 0000000000000000000000000000000000000000..503dff6dc130be648af74fc99db885e1fe41da2f --- /dev/null +++ b/vglm/bin/python @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5893c715ba63b749ab2bb330a28cd17a9e1f8ab65c7b580e714da9cec0caee3 +size 17225608 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0b64f1804ede7042c313f091a017dc65adda2083 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_log2_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_log2_slow(at::TensorList self); +TORCH_API void foreach_tensor_log2_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_log2_cuda(at::TensorList self); +TORCH_API void foreach_tensor_log2_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7a8e1dfa477a81dfec37f07e4b97d66607c51f0d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sqrt_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_sqrt_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_sqrt_slow(at::TensorList self); +TORCH_API void foreach_tensor_sqrt_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_sqrt_cuda(at::TensorList self); +TORCH_API void foreach_tensor_sqrt_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8bb60ebef9e9d6a1c5e945d5965a5bcdb7777a6f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3d845b6f968d80cd041985feed4d9589175ceb97 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hstack_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hstack { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hstack") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hstack(Tensor[] tensors) -> Tensor") + static at::Tensor call(at::TensorList tensors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API hstack_out { + using schema = at::Tensor & (at::TensorList, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hstack") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList tensors, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..06499cf517c33e7b8c9b92a1d5588899bb4315e6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); +TORCH_API ::std::tuple native_layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_meta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..e0e1aa603be317bc5167b80d049e0eb54919f96c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_renorm : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); +}; + +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3d6f6c423497449d155ae3af0e75439e4a6e65e8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sinh_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor sinh(const at::Tensor & self); +TORCH_API at::Tensor & sinh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sinh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sinh_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3d015efd6e68c7cc319766ad493734b940fb64b0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sparse_resize_and_clear_ { + using schema = const at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize_and_clear_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +}; + +struct TORCH_API sparse_resize_and_clear_out { + using schema = const at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize_and_clear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out); +}; + +struct TORCH_API sparse_resize_and_clear { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize_and_clear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/cupyx/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90b3b232a133c03c3654677e094bb39b4f2e0b89 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/__pycache__/_rsqrt.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/__pycache__/_rsqrt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66896ff62aed94d7ae84ad7c425da768c8a2f8f6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/__pycache__/_rsqrt.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/__pycache__/_runtime.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/__pycache__/_runtime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1339e8b1d46b61c7090f08a6c19bdf2937f69e1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/__pycache__/_runtime.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/__pycache__/_scatter.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/__pycache__/_scatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c95bcf79c56f5f805c1ecfd8f1d45f89631dae4a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/__pycache__/_scatter.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/__pycache__/time.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/__pycache__/time.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1aaea7c33cfdb208d15ad22c5ff267a2725b333d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/__pycache__/time.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..307914d291510a25f01b1cc7f364bf866f411b2e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/__init__.py @@ -0,0 +1,35 @@ +import sys as _sys + +from cupy._core import ndarray as _ndarray +from cupyx.scipy.sparse._base import spmatrix as _spmatrix + + +try: + import scipy as _scipy + _scipy_available = True +except ImportError: + _scipy_available = False + + +_cupyx_scipy = _sys.modules[__name__] + + +def get_array_module(*args): + """Returns the array module for arguments. + + This function is used to implement CPU/GPU generic code. If at least one of + the arguments is a :class:`cupy.ndarray` object, the :mod:`cupyx.scipy` + module is returned. + + Args: + args: Values to determine whether NumPy or CuPy should be used. + + Returns: + module: :mod:`cupyx.scipy` or :mod:`scipy` is returned based on the + types of the arguments. + + """ + for arg in args: + if isinstance(arg, (_ndarray, _spmatrix)): + return _cupyx_scipy + return _scipy diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/_lib/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/_lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/_lib/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/_lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7628b03e52082b13ea9f8c8a6d0d43474b6498e6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/_lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/_lib/__pycache__/_util.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/_lib/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f14c114d82f12943454ba15e18833234789a3af8 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/_lib/__pycache__/_util.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/_lib/_util.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/_lib/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..9693ed5a336a3c25a730fea3986c246bcaaa3ad5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/_lib/_util.py @@ -0,0 +1,73 @@ +import math + +import cupy + + +def float_factorial(n): + """Compute the factorial and return as a float + + Returns infinity when result is too large for a double + """ + return float(math.factorial(n)) if n < 171 else cupy.inf + + +def _asarray_validated(a, check_finite=True, + sparse_ok=False, objects_ok=False, mask_ok=False, + as_inexact=False): + """Helper function for SciPy argument validation. + + Many CuPy linear algebra functions do support arbitrary array-like + input arguments. Examples of commonly unsupported inputs include + matrices containing inf/nan, sparse matrix representations, and + matrices with complicated elements. + + Parameters + ---------- + a : array-like + The array-like input + check_finite : bool, optional + By default True. To check whether the input matrices contain + only finite numbers. Disabling may give a performance gain, + but may result in problems (crashes, non-termination) if the + inputs do contain infinites or NaNs + sparse_ok : bool, optional + By default False. True if cupy sparse matrices are allowed + objects_ok : bool, optional + By default False. True if arrays with dype('O') are allowed + mask_ok : bool, optional + By default False. True if masked arrays are allowed. + as_inexact : bool, optional + By default False. True to convert the input array to a + cupy.inexact dtype + + Returns + ------- + ret : cupy.ndarray + The converted validated array + + """ + + if not sparse_ok: + import cupyx.scipy.sparse + if cupyx.scipy.sparse.issparse(a): + msg = ('Sparse matrices are not supported by this function. ' + 'Perhaps one of the cupyx.scipy.sparse.linalg functions ' + 'would work instead.') + raise ValueError(msg) + + # TODO: remove these comments when CuPy supports masked arrays + # Ref Issue: https://github.com/cupy/cupy/issues/2225 + # if not mask_ok: + # if cupy.ma.isMaskedArray(a): + # raise ValueError('masked arrays are not supported') + + # TODO: remove these comments when CuPy supports 'object' dtype + # if not objects_ok: + # if a.dtype is cupy.dtype('O'): + # raise ValueError('object arrays are not supported') + + if as_inexact: + if not cupy.issubdtype(a, cupy.inexact): + a = a.astype(dtype=cupy.float64) + + return a diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c328ae29bcc998836471ed58dd49f2a7c72f9ca7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/__init__.py @@ -0,0 +1,9 @@ +from cupyx.scipy.fftpack._fft import fft # NOQA +from cupyx.scipy.fftpack._fft import fft2 # NOQA +from cupyx.scipy.fftpack._fft import fftn # NOQA +from cupyx.scipy.fftpack._fft import ifft # NOQA +from cupyx.scipy.fftpack._fft import ifft2 # NOQA +from cupyx.scipy.fftpack._fft import ifftn # NOQA +from cupyx.scipy.fftpack._fft import irfft # NOQA +from cupyx.scipy.fftpack._fft import rfft # NOQA +from cupyx.scipy.fftpack._fft import get_fft_plan # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..573c49a287009fecd7b0cca9c34a32c43df4ddf1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/__pycache__/_fft.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/__pycache__/_fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3cf7df5ecccc3ff1b46b65456f22ebb76bcd13d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/__pycache__/_fft.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/_fft.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/_fft.py new file mode 100644 index 0000000000000000000000000000000000000000..c6ae48d811d5b8bf4f0a22460531fb518c550dac --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/fftpack/_fft.py @@ -0,0 +1,508 @@ +from numpy import prod + +import cupy +from cupy.fft import config +from cupy.fft._fft import (_convert_fft_type, _default_fft_func, _fft, + _get_cufft_plan_nd, _get_fftn_out_size, + _output_dtype) +from cupy.fft._cache import get_plan_cache + + +def get_fft_plan(a, shape=None, axes=None, value_type='C2C'): + """ Generate a CUDA FFT plan for transforming up to three axes. + + Args: + a (cupy.ndarray): Array to be transform, assumed to be either C- or + F- contiguous. + shape (None or tuple of ints): Shape of the transformed axes of the + output. If ``shape`` is not given, the lengths of the input along + the axes specified by ``axes`` are used. + axes (None or int or tuple of int): The axes of the array to + transform. If `None`, it is assumed that all axes are transformed. + + Currently, for performing N-D transform these must be a set of up + to three adjacent axes, and must include either the first or the + last axis of the array. + value_type (str): The FFT type to perform. Acceptable values are: + + * 'C2C': complex-to-complex transform (default) + * 'R2C': real-to-complex transform + * 'C2R': complex-to-real transform + + Returns: + a cuFFT plan for either 1D transform (``cupy.cuda.cufft.Plan1d``) or + N-D transform (``cupy.cuda.cufft.PlanNd``). + + .. note:: + The returned plan can not only be passed as one of the arguments of + the functions in ``cupyx.scipy.fftpack``, but also be used as a + context manager for both ``cupy.fft`` and ``cupyx.scipy.fftpack`` + functions: + + .. code-block:: python + + x = cupy.random.random(16).reshape(4, 4).astype(complex) + plan = cupyx.scipy.fftpack.get_fft_plan(x) + with plan: + y = cupy.fft.fftn(x) + # alternatively: + y = cupyx.scipy.fftpack.fftn(x) # no explicit plan is given! + # alternatively: + y = cupyx.scipy.fftpack.fftn(x, plan=plan) # pass plan explicitly + + In the first case, no cuFFT plan will be generated automatically, + even if ``cupy.fft.config.enable_nd_planning = True`` is set. + + .. note:: + If this function is called under the context of + :func:`~cupy.fft.config.set_cufft_callbacks`, the generated plan will + have callbacks enabled. + + .. warning:: + This API is a deviation from SciPy's, is currently experimental, and + may be changed in the future version. + """ + from cupy.cuda import cufft + + # check input array + if a.flags.c_contiguous: + order = 'C' + elif a.flags.f_contiguous: + order = 'F' + else: + raise ValueError('Input array a must be contiguous') + + if isinstance(shape, int): + shape = (shape,) + if isinstance(axes, int): + axes = (axes,) + if (shape is not None) and (axes is not None) and len(shape) != len(axes): + raise ValueError('Shape and axes have different lengths.') + + # check axes + # n=1: 1d (need axis1D); n>1: Nd + if axes is None: + n = a.ndim if shape is None else len(shape) + axes = tuple(i for i in range(-n, 0)) + if n == 1: + axis1D = 0 + else: # axes is a tuple + n = len(axes) + if n == 1: + axis1D = axes[0] + if axis1D >= a.ndim or axis1D < -a.ndim: + err = 'The chosen axis ({0}) exceeds the number of '\ + 'dimensions of a ({1})'.format(axis1D, a.ndim) + raise ValueError(err) + elif n > 3: + raise ValueError('Only up to three axes is supported') + + # Note that "shape" here refers to the shape along transformed axes, not + # the shape of the output array, and we need to convert it to the latter. + # The result is as if "a=_cook_shape(a); return a.shape" is called. + # Because of this, we need to use (possibly unsorted) axes. + transformed_shape = shape + shape = list(a.shape) + if transformed_shape is not None: + for s, axis in zip(transformed_shape, axes): + if s is not None: + if axis == axes[-1] and value_type == 'C2R': + s = s // 2 + 1 + shape[axis] = s + shape = tuple(shape) + + # check value_type + out_dtype = _output_dtype(a.dtype, value_type) + fft_type = _convert_fft_type(out_dtype, value_type) + # TODO(leofang): figure out if we really have to skip F-order? + if n > 1 and value_type != 'C2C' and a.flags.f_contiguous: + raise ValueError('C2R/R2C PlanNd for F-order arrays is not supported') + + # generate plan + # (load from cache if it exists, otherwise create one but don't cache it) + if n > 1: # ND transform + if cupy.cuda.runtime.is_hip and value_type == 'C2R': + raise RuntimeError("hipFFT's C2R PlanNd is buggy and unsupported") + out_size = _get_fftn_out_size( + shape, transformed_shape, axes[-1], value_type) + # _get_cufft_plan_nd interacts with plan cache and callback + plan = _get_cufft_plan_nd( + shape, fft_type, axes=axes, order=order, out_size=out_size, + to_cache=False) + else: # 1D transform + # prepare plan arguments + if value_type != 'C2R': + out_size = shape[axis1D] + else: + out_size = _get_fftn_out_size( + shape, transformed_shape, axis1D, value_type) + batch = prod(shape) // shape[axis1D] + devices = None if not config.use_multi_gpus else config._devices + + keys = (out_size, fft_type, batch, devices) + mgr = config.get_current_callback_manager() + if mgr is not None: + # to avoid a weird segfault, we generate and cache distinct plans + # for every possible (load_aux, store_aux) pairs; the plans are + # still generated from the same external Python module + load_aux = mgr.cb_load_aux_arr + store_aux = mgr.cb_store_aux_arr + keys += (mgr.cb_load, mgr.cb_store, + 0 if load_aux is None else load_aux.data.ptr, + 0 if store_aux is None else store_aux.data.ptr) + cache = get_plan_cache() + cached_plan = cache.get(keys) + if cached_plan is not None: + plan = cached_plan + elif mgr is None: + plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices) + else: # has callback + # TODO(leofang): support multi-GPU callback (devices is ignored) + if devices: + raise NotImplementedError('multi-GPU cuFFT callbacks are not ' + 'yet supported') + plan = mgr.create_plan(('Plan1d', keys[:-3])) + mgr.set_callbacks(plan) + + return plan + + +def fft(x, n=None, axis=-1, overwrite_x=False, plan=None): + """Compute the one-dimensional FFT. + + Args: + x (cupy.ndarray): Array to be transformed. + n (None or int): Length of the transformed axis of the output. If ``n`` + is not given, the length of the input along the axis specified by + ``axis`` is used. + axis (int): Axis over which to compute the FFT. + overwrite_x (bool): If True, the contents of ``x`` can be destroyed. + plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for + transforming ``x`` over ``axis``, which can be obtained using:: + + plan = cupyx.scipy.fftpack.get_fft_plan(x, axis) + + Note that `plan` is defaulted to None, meaning CuPy will use an + auto-generated plan behind the scene. + + Returns: + cupy.ndarray: + The transformed array which shape is specified by ``n`` and type + will convert to complex if that of the input is another. + + .. note:: + The argument `plan` is currently experimental and the interface may be + changed in the future version. + + .. seealso:: :func:`scipy.fftpack.fft` + """ + from cupy.cuda import cufft + return _fft(x, (n,), (axis,), None, cufft.CUFFT_FORWARD, + overwrite_x=overwrite_x, plan=plan) + + +def ifft(x, n=None, axis=-1, overwrite_x=False, plan=None): + """Compute the one-dimensional inverse FFT. + + Args: + x (cupy.ndarray): Array to be transformed. + n (None or int): Length of the transformed axis of the output. If ``n`` + is not given, the length of the input along the axis specified by + ``axis`` is used. + axis (int): Axis over which to compute the FFT. + overwrite_x (bool): If True, the contents of ``x`` can be destroyed. + plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for + transforming ``x`` over ``axis``, which can be obtained using:: + + plan = cupyx.scipy.fftpack.get_fft_plan(x, axis) + + Note that `plan` is defaulted to None, meaning CuPy will use an + auto-generated plan behind the scene. + + Returns: + cupy.ndarray: + The transformed array which shape is specified by ``n`` and type + will convert to complex if that of the input is another. + + .. note:: + The argument `plan` is currently experimental and the interface may be + changed in the future version. + + .. seealso:: :func:`scipy.fftpack.ifft` + """ + from cupy.cuda import cufft + return _fft(x, (n,), (axis,), None, cufft.CUFFT_INVERSE, + overwrite_x=overwrite_x, plan=plan) + + +def fft2(x, shape=None, axes=(-2, -1), overwrite_x=False, plan=None): + """Compute the two-dimensional FFT. + + Args: + x (cupy.ndarray): Array to be transformed. + shape (None or tuple of ints): Shape of the transformed axes of the + output. If ``shape`` is not given, the lengths of the input along + the axes specified by ``axes`` are used. + axes (tuple of ints): Axes over which to compute the FFT. + overwrite_x (bool): If True, the contents of ``x`` can be destroyed. + plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for + transforming ``x`` over ``axes``, which can be obtained using:: + + plan = cupyx.scipy.fftpack.get_fft_plan(x, axes) + + Note that `plan` is defaulted to None, meaning CuPy will either + use an auto-generated plan behind the scene if cupy.fft.config. + enable_nd_planning = True, or use no cuFFT plan if it is set to + False. + + Returns: + cupy.ndarray: + The transformed array which shape is specified by ``shape`` and + type will convert to complex if that of the input is another. + + .. seealso:: :func:`scipy.fftpack.fft2` + + .. note:: + The argument `plan` is currently experimental and the interface may be + changed in the future version. + """ + from cupy.cuda import cufft + + func = _default_fft_func(x, shape, axes, plan) + return func(x, shape, axes, None, cufft.CUFFT_FORWARD, + overwrite_x=overwrite_x, plan=plan) + + +def ifft2(x, shape=None, axes=(-2, -1), overwrite_x=False, plan=None): + """Compute the two-dimensional inverse FFT. + + Args: + x (cupy.ndarray): Array to be transformed. + shape (None or tuple of ints): Shape of the transformed axes of the + output. If ``shape`` is not given, the lengths of the input along + the axes specified by ``axes`` are used. + axes (tuple of ints): Axes over which to compute the FFT. + overwrite_x (bool): If True, the contents of ``x`` can be destroyed. + plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for + transforming ``x`` over ``axes``, which can be obtained using:: + + plan = cupyx.scipy.fftpack.get_fft_plan(x, axes) + + Note that `plan` is defaulted to None, meaning CuPy will either + use an auto-generated plan behind the scene if cupy.fft.config. + enable_nd_planning = True, or use no cuFFT plan if it is set to + False. + + Returns: + cupy.ndarray: + The transformed array which shape is specified by ``shape`` and + type will convert to complex if that of the input is another. + + .. seealso:: :func:`scipy.fftpack.ifft2` + + .. note:: + The argument `plan` is currently experimental and the interface may be + changed in the future version. + """ + from cupy.cuda import cufft + + func = _default_fft_func(x, shape, axes, plan) + return func(x, shape, axes, None, cufft.CUFFT_INVERSE, + overwrite_x=overwrite_x, plan=plan) + + +def fftn(x, shape=None, axes=None, overwrite_x=False, plan=None): + """Compute the N-dimensional FFT. + + Args: + x (cupy.ndarray): Array to be transformed. + shape (None or tuple of ints): Shape of the transformed axes of the + output. If ``shape`` is not given, the lengths of the input along + the axes specified by ``axes`` are used. + axes (tuple of ints): Axes over which to compute the FFT. + overwrite_x (bool): If True, the contents of ``x`` can be destroyed. + plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for + transforming ``x`` over ``axes``, which can be obtained using:: + + plan = cupyx.scipy.fftpack.get_fft_plan(x, axes) + + Note that `plan` is defaulted to None, meaning CuPy will either + use an auto-generated plan behind the scene if cupy.fft.config. + enable_nd_planning = True, or use no cuFFT plan if it is set to + False. + + Returns: + cupy.ndarray: + The transformed array which shape is specified by ``shape`` and + type will convert to complex if that of the input is another. + + .. seealso:: :func:`scipy.fftpack.fftn` + + .. note:: + The argument `plan` is currently experimental and the interface may be + changed in the future version. + """ + from cupy.cuda import cufft + + func = _default_fft_func(x, shape, axes, plan) + return func(x, shape, axes, None, cufft.CUFFT_FORWARD, + overwrite_x=overwrite_x, plan=plan) + + +def ifftn(x, shape=None, axes=None, overwrite_x=False, plan=None): + """Compute the N-dimensional inverse FFT. + + Args: + x (cupy.ndarray): Array to be transformed. + shape (None or tuple of ints): Shape of the transformed axes of the + output. If ``shape`` is not given, the lengths of the input along + the axes specified by ``axes`` are used. + axes (tuple of ints): Axes over which to compute the FFT. + overwrite_x (bool): If True, the contents of ``x`` can be destroyed. + plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for + transforming ``x`` over ``axes``, which can be obtained using:: + + plan = cupyx.scipy.fftpack.get_fft_plan(x, axes) + + Note that `plan` is defaulted to None, meaning CuPy will either + use an auto-generated plan behind the scene if cupy.fft.config. + enable_nd_planning = True, or use no cuFFT plan if it is set to + False. + + Returns: + cupy.ndarray: + The transformed array which shape is specified by ``shape`` and + type will convert to complex if that of the input is another. + + .. seealso:: :func:`scipy.fftpack.ifftn` + + .. note:: + The argument `plan` is currently experimental and the interface may be + changed in the future version. + """ + from cupy.cuda import cufft + + func = _default_fft_func(x, shape, axes, plan) + return func(x, shape, axes, None, cufft.CUFFT_INVERSE, + overwrite_x=overwrite_x, plan=plan) + + +def rfft(x, n=None, axis=-1, overwrite_x=False, plan=None): + """Compute the one-dimensional FFT for real input. + + The returned real array contains + + .. code-block:: python + + [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] # if n is even + [y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] # if n is odd + + Args: + x (cupy.ndarray): Array to be transformed. + n (None or int): Length of the transformed axis of the output. If ``n`` + is not given, the length of the input along the axis specified by + ``axis`` is used. + axis (int): Axis over which to compute the FFT. + overwrite_x (bool): If True, the contents of ``x`` can be destroyed. + plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for + transforming ``x`` over ``axis``, which can be obtained using:: + + plan = cupyx.scipy.fftpack.get_fft_plan( + x, axes, value_type='R2C') + + Note that `plan` is defaulted to None, meaning CuPy will either + use an auto-generated plan behind the scene if cupy.fft.config. + enable_nd_planning = True, or use no cuFFT plan if it is set to + False. + + Returns: + cupy.ndarray: + The transformed array. + + .. seealso:: :func:`scipy.fftpack.rfft` + + .. note:: + The argument `plan` is currently experimental and the interface may be + changed in the future version. + """ + from cupy.cuda import cufft + + if n is None: + n = x.shape[axis] + + shape = list(x.shape) + shape[axis] = n + f = _fft(x, (n,), (axis,), None, cufft.CUFFT_FORWARD, 'R2C', + overwrite_x=overwrite_x, plan=plan) + z = cupy.empty(shape, f.real.dtype) + + slice_z = [slice(None)] * x.ndim + slice_f = [slice(None)] * x.ndim + + slice_z[axis] = slice(1) + slice_f[axis] = slice(1) + z[tuple(slice_z)] = f[tuple(slice_f)].real + + slice_z[axis] = slice(1, None, 2) + slice_f[axis] = slice(1, None) + z[tuple(slice_z)] = f[tuple(slice_f)].real + + slice_z[axis] = slice(2, None, 2) + slice_f[axis] = slice(1, n - f.shape[axis] + 1) + z[tuple(slice_z)] = f[tuple(slice_f)].imag + + return z + + +def irfft(x, n=None, axis=-1, overwrite_x=False): + """Compute the one-dimensional inverse FFT for real input. + + Args: + x (cupy.ndarray): Array to be transformed. + n (None or int): Length of the transformed axis of the output. If ``n`` + is not given, the length of the input along the axis specified by + ``axis`` is used. + axis (int): Axis over which to compute the FFT. + overwrite_x (bool): If True, the contents of ``x`` can be destroyed. + + Returns: + cupy.ndarray: + The transformed array. + + .. seealso:: :func:`scipy.fftpack.irfft` + + .. note:: + This function does not support a precomputed `plan`. If you need this + capability, please consider using :func:`cupy.fft.irfft` or :func:` + cupyx.scipy.fft.irfft`. + """ + from cupy.cuda import cufft + + if n is None: + n = x.shape[axis] + m = min(n, x.shape[axis]) + + shape = list(x.shape) + shape[axis] = n // 2 + 1 + if x.dtype in (cupy.float16, cupy.float32): + z = cupy.zeros(shape, dtype=cupy.complex64) + else: + z = cupy.zeros(shape, dtype=cupy.complex128) + + slice_x = [slice(None)] * x.ndim + slice_z = [slice(None)] * x.ndim + + slice_x[axis] = slice(1) + slice_z[axis] = slice(1) + z[tuple(slice_z)].real = x[tuple(slice_x)] + + slice_x[axis] = slice(1, m, 2) + slice_z[axis] = slice(1, m // 2 + 1) + z[tuple(slice_z)].real = x[tuple(slice_x)] + + slice_x[axis] = slice(2, m, 2) + slice_z[axis] = slice(1, (m + 1) // 2) + z[tuple(slice_z)].imag = x[tuple(slice_x)] + + return _fft(z, (n,), (axis,), None, cufft.CUFFT_INVERSE, 'C2R', + overwrite_x=overwrite_x) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6cb5202571e73748469fef48b0d0df43155c47a0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__init__.py @@ -0,0 +1,175 @@ +from cupyx.scipy.signal._signaltools import convolve # NOQA +from cupyx.scipy.signal._signaltools import correlate # NOQA +from cupyx.scipy.signal._signaltools import deconvolve # NOQA +from cupyx.scipy.signal._signaltools import fftconvolve # NOQA +from cupyx.scipy.signal._signaltools import choose_conv_method # NOQA +from cupyx.scipy.signal._signaltools import oaconvolve # NOQA +from cupyx.scipy.signal._signaltools import convolve2d # NOQA +from cupyx.scipy.signal._signaltools import correlate2d # NOQA +from cupyx.scipy.signal._signaltools import correlation_lags # NOQA +from cupyx.scipy.signal._signaltools import wiener # NOQA +from cupyx.scipy.signal._signaltools import order_filter # NOQA +from cupyx.scipy.signal._signaltools import medfilt # NOQA +from cupyx.scipy.signal._signaltools import medfilt2d # NOQA +from cupyx.scipy.signal._signaltools import lfilter # NOQA +from cupyx.scipy.signal._signaltools import lfiltic # NOQA +from cupyx.scipy.signal._signaltools import lfilter_zi # NOQA +from cupyx.scipy.signal._signaltools import detrend # NOQA +from cupyx.scipy.signal._signaltools import filtfilt # NOQA +from cupyx.scipy.signal._signaltools import sosfilt # NOQA +from cupyx.scipy.signal._signaltools import sosfilt_zi # NOQA +from cupyx.scipy.signal._signaltools import sosfiltfilt # NOQA +from cupyx.scipy.signal._signaltools import hilbert # NOQA +from cupyx.scipy.signal._signaltools import hilbert2 # NOQA + +from cupyx.scipy.signal._resample import resample # NOQA +from cupyx.scipy.signal._resample import resample_poly # NOQA +from cupyx.scipy.signal._resample import decimate # NOQA + +from cupyx.scipy.signal._polyutils import unique_roots # NOQA +from cupyx.scipy.signal._polyutils import invres # NOQA +from cupyx.scipy.signal._polyutils import invresz # NOQA +from cupyx.scipy.signal._polyutils import residue # NOQA +from cupyx.scipy.signal._polyutils import residuez # NOQA + +from cupyx.scipy.signal._bsplines import sepfir2d # NOQA +from cupyx.scipy.signal._bsplines import cspline1d # NOQA +from cupyx.scipy.signal._bsplines import qspline1d # NOQA +from cupyx.scipy.signal._bsplines import cspline2d # NOQA +from cupyx.scipy.signal._bsplines import qspline2d # NOQA +from cupyx.scipy.signal._bsplines import cspline1d_eval # NOQA +from cupyx.scipy.signal._bsplines import qspline1d_eval # NOQA +from cupyx.scipy.signal._bsplines import spline_filter # NOQA +from cupyx.scipy.signal._bsplines import gauss_spline # NOQA + +from cupyx.scipy.signal._splines import symiirorder1 # NOQA +from cupyx.scipy.signal._splines import symiirorder2 # NOQA + +from cupyx.scipy.signal._savitzky_golay import savgol_coeffs, savgol_filter # NOQA + +from cupyx.scipy.signal._filter_design import gammatone # NOQA +from cupyx.scipy.signal._filter_design import group_delay # NOQA + +from cupyx.scipy.signal._fir_filter_design import kaiser_atten # NOQA +from cupyx.scipy.signal._fir_filter_design import kaiser_beta # NOQA +from cupyx.scipy.signal._fir_filter_design import kaiserord # NOQA + +from cupyx.scipy.signal._iir_filter_conversions import BadCoefficients # NOQA +from cupyx.scipy.signal._iir_filter_conversions import normalize # NOQA + +from cupyx.scipy.signal._iir_filter_conversions import bilinear # NOQA +from cupyx.scipy.signal._iir_filter_conversions import lp2lp # NOQA +from cupyx.scipy.signal._iir_filter_conversions import lp2hp # NOQA +from cupyx.scipy.signal._iir_filter_conversions import lp2bp # NOQA +from cupyx.scipy.signal._iir_filter_conversions import lp2bs # NOQA + +from cupyx.scipy.signal._iir_filter_conversions import bilinear_zpk # NOQA +from cupyx.scipy.signal._iir_filter_conversions import lp2lp_zpk # NOQA +from cupyx.scipy.signal._iir_filter_conversions import lp2hp_zpk # NOQA +from cupyx.scipy.signal._iir_filter_conversions import lp2bp_zpk # NOQA +from cupyx.scipy.signal._iir_filter_conversions import lp2bs_zpk # NOQA + +from cupyx.scipy.signal._iir_filter_conversions import zpk2tf # NOQA +from cupyx.scipy.signal._iir_filter_conversions import zpk2sos # NOQA +from cupyx.scipy.signal._iir_filter_conversions import zpk2ss # NOQA +from cupyx.scipy.signal._iir_filter_conversions import tf2zpk # NOQA +from cupyx.scipy.signal._iir_filter_conversions import tf2sos # NOQA +from cupyx.scipy.signal._iir_filter_conversions import tf2ss # NOQA +from cupyx.scipy.signal._iir_filter_conversions import ss2tf # NOQA +from cupyx.scipy.signal._iir_filter_conversions import ss2zpk # NOQA +from cupyx.scipy.signal._iir_filter_conversions import sos2tf # NOQA +from cupyx.scipy.signal._iir_filter_conversions import sos2zpk # NOQA + +from cupyx.scipy.signal._iir_filter_conversions import band_stop_obj # NOQA +from cupyx.scipy.signal.windows._windows import get_window # NOQA + +from cupyx.scipy.signal._iir_filter_conversions import buttap # NOQA +from cupyx.scipy.signal._iir_filter_conversions import cheb1ap # NOQA +from cupyx.scipy.signal._iir_filter_conversions import cheb2ap # NOQA +from cupyx.scipy.signal._iir_filter_conversions import ellipap # NOQA + +from cupyx.scipy.signal._iir_filter_conversions import buttord # NOQA +from cupyx.scipy.signal._iir_filter_conversions import cheb1ord # NOQA +from cupyx.scipy.signal._iir_filter_conversions import cheb2ord # NOQA +from cupyx.scipy.signal._iir_filter_conversions import ellipord # NOQA + +from cupyx.scipy.signal._iir_filter_design import iirfilter # NOQA +from cupyx.scipy.signal._iir_filter_design import butter # NOQA +from cupyx.scipy.signal._iir_filter_design import cheby1 # NOQA +from cupyx.scipy.signal._iir_filter_design import cheby2 # NOQA +from cupyx.scipy.signal._iir_filter_design import ellip # NOQA +from cupyx.scipy.signal._iir_filter_design import iirdesign # NOQA +from cupyx.scipy.signal._iir_filter_design import iircomb # NOQA +from cupyx.scipy.signal._iir_filter_design import iirnotch # NOQA +from cupyx.scipy.signal._iir_filter_design import iirpeak # NOQA + +from cupyx.scipy.signal._fir_filter_design import firwin # NOQA +from cupyx.scipy.signal._fir_filter_design import firwin2 # NOQA +from cupyx.scipy.signal._fir_filter_design import firls # NOQA +from cupyx.scipy.signal._fir_filter_design import minimum_phase # NOQA + +from cupyx.scipy.signal._filter_design import findfreqs # NOQA +from cupyx.scipy.signal._filter_design import freqs # NOQA +from cupyx.scipy.signal._filter_design import freqs_zpk # NOQA + +from cupyx.scipy.signal._filter_design import freqz # NOQA +from cupyx.scipy.signal._filter_design import freqz_zpk # NOQA +from cupyx.scipy.signal._filter_design import sosfreqz # NOQA + +from cupyx.scipy.signal._waveforms import chirp # NOQA +from cupyx.scipy.signal._waveforms import gausspulse # NOQA +from cupyx.scipy.signal._waveforms import sawtooth # NOQA +from cupyx.scipy.signal._waveforms import square # NOQA +from cupyx.scipy.signal._waveforms import unit_impulse # NOQA +from cupyx.scipy.signal._max_len_seq import max_len_seq # NOQA + +from cupyx.scipy.signal._czt import * # NOQA + +from cupyx.scipy.signal._wavelets import morlet # NOQA +from cupyx.scipy.signal._wavelets import qmf # NOQA +from cupyx.scipy.signal._wavelets import ricker # NOQA +from cupyx.scipy.signal._wavelets import morlet2 # NOQA +from cupyx.scipy.signal._wavelets import cwt # NOQA + +from cupyx.scipy.signal._lti_conversion import abcd_normalize # NOQA + +from cupyx.scipy.signal._upfirdn import upfirdn # NOQA + +from cupyx.scipy.signal._peak_finding import find_peaks # NOQA +from cupyx.scipy.signal._peak_finding import peak_prominences # NOQA +from cupyx.scipy.signal._peak_finding import peak_widths # NOQA + +from cupyx.scipy.signal._ltisys import lti # NOQA +from cupyx.scipy.signal._ltisys import lsim # NOQA +from cupyx.scipy.signal._ltisys import impulse # NOQA +from cupyx.scipy.signal._ltisys import step # NOQA +from cupyx.scipy.signal._ltisys import freqresp # NOQA +from cupyx.scipy.signal._ltisys import bode # NOQA + +from cupyx.scipy.signal._ltisys import dlti # NOQA +from cupyx.scipy.signal._ltisys import dlsim # NOQA +from cupyx.scipy.signal._ltisys import dstep # NOQA +from cupyx.scipy.signal._ltisys import dimpulse # NOQA +from cupyx.scipy.signal._ltisys import dbode # NOQA +from cupyx.scipy.signal._ltisys import dfreqresp # NOQA +from cupyx.scipy.signal._ltisys import StateSpace # NOQA +from cupyx.scipy.signal._ltisys import TransferFunction # NOQA +from cupyx.scipy.signal._ltisys import ZerosPolesGain # NOQA +from cupyx.scipy.signal._ltisys import cont2discrete # NOQA +from cupyx.scipy.signal._ltisys import place_poles # NOQA + +from cupyx.scipy.signal._spectral import lombscargle # NOQA +from cupyx.scipy.signal._spectral import periodogram # NOQA +from cupyx.scipy.signal._spectral import welch # NOQA +from cupyx.scipy.signal._spectral import csd # NOQA +from cupyx.scipy.signal._spectral import check_COLA # NOQA +from cupyx.scipy.signal._spectral import check_NOLA # NOQA +from cupyx.scipy.signal._spectral import stft # NOQA +from cupyx.scipy.signal._spectral import istft # NOQA +from cupyx.scipy.signal._spectral import spectrogram # NOQA +from cupyx.scipy.signal._spectral import vectorstrength # NOQA +from cupyx.scipy.signal._spectral import coherence # NOQA + +from cupyx.scipy.signal._peak_finding import argrelextrema # NOQA +from cupyx.scipy.signal._peak_finding import argrelmin # NOQA +from cupyx.scipy.signal._peak_finding import argrelmax # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_czt.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_czt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16954a472f040579f683a5bc28d272dffaeb4c96 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_czt.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_filter_design.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_filter_design.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0444088b5007357792e9f3483c3bf2969986308c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_filter_design.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_iir_filter_conversions.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_iir_filter_conversions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0b95b3ee8df52134d9cebe0f933225815ad5116 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_iir_filter_conversions.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_iir_filter_design.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_iir_filter_design.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a61f1acf7306d2c92cfa3aa4fa733802c95336d1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_iir_filter_design.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_iir_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_iir_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe6c909d9e174a7643aea217a04e7aa6ad735236 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_iir_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_lti_conversion.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_lti_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7068d171fa11ec10057a61c16bbfa69a2f991c49 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_lti_conversion.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_ltisys.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_ltisys.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b1aba1bda60f30483d0e66e0c9e9fcb5a982068 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_ltisys.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_max_len_seq.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_max_len_seq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70c20a7a6e2909d454bb0c16f5e562f88db8d269 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_max_len_seq.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_resample.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_resample.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34a7305459912823bb92684c5775ed6732e77ed6 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_resample.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_signaltools.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_signaltools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e6d506cd70762b4127bbe79f4ae15837292072c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_signaltools.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_signaltools_core.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_signaltools_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67767cc1542d5b295fadf382d62151aa5a16e27b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_signaltools_core.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_spectral_impl.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_spectral_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..667904f64cc736ef5078039cc77ed113ce0372b1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_spectral_impl.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_splines.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_splines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac7c669e18ef7239a693549181f4ce900a83e0e2 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_splines.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_upfirdn.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_upfirdn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0f6132245894f102d3b69e702a19f1475853956 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_upfirdn.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_arraytools.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_arraytools.py new file mode 100644 index 0000000000000000000000000000000000000000..f0f0366de187f61c7fb4b0400d670c6614ba3af6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_arraytools.py @@ -0,0 +1,272 @@ +""" +Functions for acting on a axis of an array. +""" +import cupy + + +def axis_slice(a, start=None, stop=None, step=None, axis=-1): + """Take a slice along axis 'axis' from 'a'. + + Parameters + ---------- + a : cupy.ndarray + The array to be sliced. + start, stop, step : int or None + The slice parameters. + axis : int, optional + The axis of `a` to be sliced. + + Examples + -------- + >>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> axis_slice(a, start=0, stop=1, axis=1) + array([[1], + [4], + [7]]) + >>> axis_slice(a, start=1, axis=0) + array([[4, 5, 6], + [7, 8, 9]]) + + Notes + ----- + The keyword arguments start, stop and step are used by calling + slice(start, stop, step). This implies axis_slice() does not + handle its arguments the exactly the same as indexing. To select + a single index k, for example, use + axis_slice(a, start=k, stop=k+1) + In this case, the length of the axis 'axis' in the result will + be 1; the trivial dimension is not removed. (Use cupy.squeeze() + to remove trivial axes.) + """ + a_slice = [slice(None)] * a.ndim + a_slice[axis] = slice(start, stop, step) + b = a[tuple(a_slice)] + return b + + +def axis_assign(a, b, start=None, stop=None, step=None, axis=-1): + """Take a slice along axis 'axis' from 'a' and set it to 'b' in-place. + + Parameters + ---------- + a : numpy.ndarray + The array to be sliced. + b : cupy.ndarray + The array to be assigned. + start, stop, step : int or None + The slice parameters. + axis : int, optional + The axis of `a` to be sliced. + + Examples + -------- + >>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> b1 = array([[-1], [-4], [-7]]) + >>> axis_assign(a, b1, start=0, stop=1, axis=1) + array([[-1, 2, 3], + [-4, 5, 6], + [-7, 8, 9]]) + + Notes + ----- + The keyword arguments start, stop and step are used by calling + slice(start, stop, step). This implies axis_assign() does not + handle its arguments the exactly the same as indexing. To assign + a single index k, for example, use + axis_assign(a, start=k, stop=k+1) + In this case, the length of the axis 'axis' in the result will + be 1; the trivial dimension is not removed. (Use numpy.squeeze() + to remove trivial axes.) + + This function works in-place and will modify the values contained in `a` + """ + a_slice = [slice(None)] * a.ndim + a_slice[axis] = slice(start, stop, step) + a[tuple(a_slice)] = b + return a + + +def axis_reverse(a, axis=-1): + """Reverse the 1-D slices of `a` along axis `axis`. + + Returns axis_slice(a, step=-1, axis=axis). + """ + return axis_slice(a, step=-1, axis=axis) + + +def odd_ext(x, n, axis=-1): + """ + Odd extension at the boundaries of an array + + Generate a new ndarray by making an odd extension of `x` along an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> from cupyx.scipy.signal._arraytools import odd_ext + >>> a = cupy.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> odd_ext(a, 2) + array([[-1, 0, 1, 2, 3, 4, 5, 6, 7], + [-4, -1, 0, 1, 4, 9, 16, 23, 28]]) + """ + if n < 1: + return x + if n > x.shape[axis] - 1: + raise ValueError(("The extension length n (%d) is too big. " + + "It must not exceed x.shape[axis]-1, which is %d.") + % (n, x.shape[axis] - 1)) + left_end = axis_slice(x, start=0, stop=1, axis=axis) + left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) + right_end = axis_slice(x, start=-1, axis=axis) + right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) + ext = cupy.concatenate((2 * left_end - left_ext, x, + 2 * right_end - right_ext), axis=axis) + return ext + + +def even_ext(x, n, axis=-1): + """ + Even extension at the boundaries of an array + + Generate a new ndarray by making an even extension of `x` along an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> from cupyx.scipy.signal._arraytools import even_ext + >>> a = cupy.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> even_ext(a, 2) + array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3], + [ 4, 1, 0, 1, 4, 9, 16, 9, 4]]) + + """ + if n < 1: + return x + if n > x.shape[axis] - 1: + raise ValueError(("The extension length n (%d) is too big. " + + "It must not exceed x.shape[axis]-1, which is %d.") + % (n, x.shape[axis] - 1)) + left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) + right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) + ext = cupy.concatenate((left_ext, x, right_ext), axis=axis) + return ext + + +def const_ext(x, n, axis=-1): + """ + Constant extension at the boundaries of an array + + Generate a new ndarray that is a constant extension of `x` along an axis. + The extension repeats the values at the first and last element of + the axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> from cupyx.scipy.signal._arraytools import const_ext + >>> a = cupy.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> const_ext(a, 2) + array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5], + [ 0, 0, 0, 1, 4, 9, 16, 16, 16]]) + """ + if n < 1: + return x + left_end = axis_slice(x, start=0, stop=1, axis=axis) + ones_shape = [1] * x.ndim + ones_shape[axis] = n + ones = cupy.ones(ones_shape, dtype=x.dtype) + left_ext = ones * left_end + right_end = axis_slice(x, start=-1, axis=axis) + right_ext = ones * right_end + ext = cupy.concatenate((left_ext, x, right_ext), axis=axis) + return ext + + +def zero_ext(x, n, axis=-1): + """ + Zero padding at the boundaries of an array + + Generate a new ndarray that is a zero-padded extension of `x` along + an axis. + + Parameters + ---------- + x : ndarray + The array to be extended. + n : int + The number of elements by which to extend `x` at each end of the + axis. + axis : int, optional + The axis along which to extend `x`. Default is -1. + + Examples + -------- + >>> from cupyx.scipy.signal._arraytools import zero_ext + >>> a = cupy.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) + >>> zero_ext(a, 2) + array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0], + [ 0, 0, 0, 1, 4, 9, 16, 0, 0]]) + """ + if n < 1: + return x + zeros_shape = list(x.shape) + zeros_shape[axis] = n + zeros = cupy.zeros(zeros_shape, dtype=x.dtype) + ext = cupy.concatenate((zeros, x, zeros), axis=axis) + return ext + + +def _as_strided(x, shape=None, strides=None): + """ + Create a view into the array with the given shape and strides. + .. warning:: This function has to be used with extreme care, see notes. + + Parameters + ---------- + x : ndarray + Array to create a new. + shape : sequence of int, optional + The shape of the new array. Defaults to ``x.shape``. + strides : sequence of int, optional + The strides of the new array. Defaults to ``x.strides``. + + Returns + ------- + view : ndarray + + Notes + ----- + ``as_strided`` creates a view into the array given the exact strides + and shape. This means it manipulates the internal data structure of + ndarray and, if done incorrectly, the array elements can point to + invalid memory and can corrupt results or crash your program. + """ + shape = x.shape if shape is None else tuple(shape) + strides = x.strides if strides is None else tuple(strides) + + return cupy.ndarray( + shape=shape, dtype=x.dtype, memptr=x.data, strides=strides) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_bsplines.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_bsplines.py new file mode 100644 index 0000000000000000000000000000000000000000..ce5546f6c7e96f7e44b3d15766e09a71ec36c1f7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_bsplines.py @@ -0,0 +1,596 @@ + +""" +Signal processing B-Splines + +Some of the functions defined here were ported directly from CuSignal under +terms of the MIT license, under the following notice: + +Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +""" + +import cupy +import cupyx.scipy.ndimage + +from cupyx.scipy.signal._iir_utils import apply_iir_sos +from cupyx.scipy.signal._splines import _symiirorder1_nd, _symiirorder2_nd +from cupyx.scipy.interpolate._bspline import BSpline + +import numpy as np + + +def sepfir2d(input, hrow, hcol): + """Convolve with a 2-D separable FIR filter. + + Convolve the rank-2 input array with the separable filter defined by the + rank-1 arrays hrow, and hcol. Mirror symmetric boundary conditions are + assumed. This function can be used to find an image given its B-spline + representation. + + The arguments `hrow` and `hcol` must be 1-dimensional and of off length. + + Args: + input (cupy.ndarray): The input signal + hrow (cupy.ndarray): Row direction filter + hcol (cupy.ndarray): Column direction filter + + Returns: + cupy.ndarray: The filtered signal + + .. seealso:: :func:`scipy.signal.sepfir2d` + """ + if any(x.ndim != 1 or x.size % 2 == 0 for x in (hrow, hcol)): + raise ValueError('hrow and hcol must be 1 dimensional and odd length') + dtype = input.dtype + if dtype.kind == 'c': + dtype = cupy.complex64 if dtype == cupy.complex64 else cupy.complex128 + elif dtype == cupy.float32 or dtype.itemsize <= 2: + dtype = cupy.float32 + else: + dtype = cupy.float64 + input = input.astype(dtype, copy=False) + hrow = hrow.astype(dtype, copy=False) + hcol = hcol.astype(dtype, copy=False) + filters = (hcol[::-1].conj(), hrow[::-1].conj()) + return cupyx.scipy.ndimage._filters._run_1d_correlates( + input, (0, 1), lambda i: filters[i], None, 'reflect', 0) + + +def _quadratic(x): + x = abs(cupy.asarray(x, dtype=float)) + b = BSpline.basis_element( + cupy.asarray([-1.5, -0.5, 0.5, 1.5]), extrapolate=False) + out = b(x) + out[(x < -1.5) | (x > 1.5)] = 0 + return out + + +def _cubic(x): + x = cupy.asarray(x, dtype=float) + b = BSpline.basis_element( + cupy.asarray([-2, -1, 0, 1, 2]), extrapolate=False) + out = b(x) + out[(x < -2) | (x > 2)] = 0 + return out + + +@cupy.fuse() +def _coeff_smooth(lam): + xi = 1 - 96 * lam + 24 * lam * cupy.sqrt(3 + 144 * lam) + omeg = cupy.arctan2(cupy.sqrt(144 * lam - 1), cupy.sqrt(xi)) + rho = (24 * lam - 1 - cupy.sqrt(xi)) / (24 * lam) + rho = rho * cupy.sqrt( + (48 * lam + 24 * lam * cupy.sqrt(3 + 144 * lam)) / xi) + return rho, omeg + + +@cupy.fuse() +def _hc(k, cs, rho, omega): + return (cs / cupy.sin(omega) * (rho ** k) * cupy.sin(omega * (k + 1)) * + cupy.greater(k, -1)) + + +@cupy.fuse() +def _hs(k, cs, rho, omega): + c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) / + (1 - 2 * rho * rho * cupy.cos(2 * omega) + rho ** 4)) + gamma = (1 - rho * rho) / (1 + rho * rho) / cupy.tan(omega) + ak = cupy.abs(k) + return c0 * rho ** ak * ( + cupy.cos(omega * ak) + gamma * cupy.sin(omega * ak)) + + +def _cubic_smooth_coeff(signal, lamb): + rho, omega = _coeff_smooth(lamb) + cs = 1 - 2 * rho * cupy.cos(omega) + rho * rho + K = len(signal) + yp = cupy.zeros((K,), signal.dtype.char) + k = cupy.arange(K) + + state_0 = (_hc(0, cs, rho, omega) * signal[0] + + cupy.sum(_hc(k + 1, cs, rho, omega) * signal)) + state_1 = (_hc(0, cs, rho, omega) * signal[0] + + _hc(1, cs, rho, omega) * signal[1] + + cupy.sum(_hc(k + 2, cs, rho, omega) * signal)) + + zi = cupy.r_[0, 0, state_0, state_1] + zi = cupy.atleast_2d(zi) + + coef = cupy.r_[cs, 0, 0, 1, -2 * rho * cupy.cos(omega), rho * rho] + coef = cupy.atleast_2d(coef) + + # Forward pass: + # + # yp[0] = (_hc(0, cs, rho, omega) * signal[0] + + # cupy.sum(_hc(k + 1, cs, rho, omega) * signal)) + # yp[1] = (_hc(0, cs, rho, omega) * signal[0] + + # _hc(1, cs, rho, omega) * signal[1] + + # cupy.sum(_hc(k + 2, cs, rho, omega) * signal)) + # for n in range(2, K): + # yp[n] = (cs * signal[n] + 2 * rho * cupy.cos(omega) * yp[n - 1] - + # rho * rho * yp[n - 2]) + + yp, _ = apply_iir_sos(signal[2:], coef, zi=zi, dtype=signal.dtype) + yp = cupy.r_[state_0, state_1, yp] + + # Reverse pass: + # + # y[K - 1] = cupy.sum((_hs(k, cs, rho, omega) + + # _hs(k + 1, cs, rho, omega)) * signal[::-1]) + # y[K - 2] = cupy.sum((_hs(k - 1, cs, rho, omega) + + # _hs(k + 2, cs, rho, omega)) * signal[::-1]) + # for n in range(K - 3, -1, -1): + # y[n] = (cs * yp[n] + 2 * rho * cupy.cos(omega) * y[n + 1] - + # rho * rho * y[n + 2]) + + state_0 = cupy.sum((_hs(k, cs, rho, omega) + + _hs(k + 1, cs, rho, omega)) * signal[::-1]) + state_1 = cupy.sum((_hs(k - 1, cs, rho, omega) + + _hs(k + 2, cs, rho, omega)) * signal[::-1]) + + zi = cupy.r_[0, 0, state_0, state_1] + zi = cupy.atleast_2d(zi) + + y, _ = apply_iir_sos(yp[-3::-1], coef, zi=zi, dtype=signal.dtype) + y = cupy.r_[y[::-1], state_1, state_0] + return y + + +def _cubic_coeff(signal): + zi = -2 + cupy.sqrt(3) + K = len(signal) + powers = zi ** cupy.arange(K) + + if K == 1: + yplus = signal[0] + zi * cupy.sum(powers * signal) + output = zi / (zi - 1) * yplus + return cupy.atleast_1d(output) + + state = cupy.r_[0, 0, 0, cupy.sum(powers * signal)] + state = cupy.atleast_2d(state) + coef = cupy.r_[1, 0, 0, 1, -zi, 0] + coef = cupy.atleast_2d(coef) + + # yplus[0] = signal[0] + zi * sum(powers * signal) + # for k in range(1, K): + # yplus[k] = signal[k] + zi * yplus[k - 1] + yplus, _ = apply_iir_sos(signal, coef, zi=state, apply_fir=False, + dtype=signal.dtype) + + out_last = zi / (zi - 1) * yplus[K - 1] + state = cupy.r_[0, 0, 0, out_last] + state = cupy.atleast_2d(state) + + coef = cupy.r_[-zi, 0, 0, 1, -zi, 0] + coef = cupy.atleast_2d(coef) + + # output[K - 1] = zi / (zi - 1) * yplus[K - 1] + # for k in range(K - 2, -1, -1): + # output[k] = zi * (output[k + 1] - yplus[k]) + output, _ = apply_iir_sos( + yplus[-2::-1], coef, zi=state, dtype=signal.dtype) + output = cupy.r_[output[::-1], out_last] + return output * 6.0 + + +def _quadratic_coeff(signal): + zi = -3 + 2 * cupy.sqrt(2.0) + K = len(signal) + powers = zi ** cupy.arange(K) + + if K == 1: + yplus = signal[0] + zi * cupy.sum(powers * signal) + output = zi / (zi - 1) * yplus + return cupy.atleast_1d(output) + + state = cupy.r_[0, 0, 0, cupy.sum(powers * signal)] + state = cupy.atleast_2d(state) + coef = cupy.r_[1, 0, 0, 1, -zi, 0] + coef = cupy.atleast_2d(coef) + + # yplus[0] = signal[0] + zi * cupy.sum(powers * signal) + # for k in range(1, K): + # yplus[k] = signal[k] + zi * yplus[k - 1] + yplus, _ = apply_iir_sos(signal, coef, zi=state, apply_fir=False, + dtype=signal.dtype) + + out_last = zi / (zi - 1) * yplus[K - 1] + state = cupy.r_[0, 0, 0, out_last] + state = cupy.atleast_2d(state) + + coef = cupy.r_[-zi, 0, 0, 1, -zi, 0] + coef = cupy.atleast_2d(coef) + + # output[K - 1] = zi / (zi - 1) * yplus[K - 1] + # for k in range(K - 2, -1, -1): + # output[k] = zi * (output[k + 1] - yplus[k]) + output, _ = apply_iir_sos( + yplus[-2::-1], coef, zi=state, dtype=signal.dtype) + output = cupy.r_[output[::-1], out_last] + return output * 8.0 + + +def compute_root_from_lambda(lamb): + tmp = np.sqrt(3 + 144 * lamb) + xi = 1 - 96 * lamb + 24 * lamb * tmp + omega = np.arctan(np.sqrt((144 * lamb - 1.0) / xi)) + tmp2 = np.sqrt(xi) + r = ((24 * lamb - 1 - tmp2) / (24 * lamb) * + np.sqrt((48*lamb + 24 * lamb * tmp)) / tmp2) + return r, omega + + +def cspline1d(signal, lamb=0.0): + """ + Compute cubic spline coefficients for rank-1 array. + + Find the cubic spline coefficients for a 1-D signal assuming + mirror-symmetric boundary conditions. To obtain the signal back from the + spline representation mirror-symmetric-convolve these coefficients with a + length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 . + + Parameters + ---------- + signal : ndarray + A rank-1 array representing samples of a signal. + lamb : float, optional + Smoothing coefficient, default is 0.0. + + Returns + ------- + c : ndarray + Cubic spline coefficients. + + See Also + -------- + cspline1d_eval : Evaluate a cubic spline at the new set of points. + + """ + if lamb != 0.0: + return _cubic_smooth_coeff(signal, lamb) + else: + return _cubic_coeff(signal) + + +def qspline1d(signal, lamb=0.0): + """Compute quadratic spline coefficients for rank-1 array. + + Parameters + ---------- + signal : ndarray + A rank-1 array representing samples of a signal. + lamb : float, optional + Smoothing coefficient (must be zero for now). + + Returns + ------- + c : ndarray + Quadratic spline coefficients. + + See Also + -------- + qspline1d_eval : Evaluate a quadratic spline at the new set of points. + + Notes + ----- + Find the quadratic spline coefficients for a 1-D signal assuming + mirror-symmetric boundary conditions. To obtain the signal back from the + spline representation mirror-symmetric-convolve these coefficients with a + length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 . + + """ + if lamb != 0.0: + raise ValueError("Smoothing quadratic splines not supported yet.") + else: + return _quadratic_coeff(signal) + + +def cspline1d_eval(cj, newx, dx=1.0, x0=0): + """Evaluate a cubic spline at the new set of points. + + `dx` is the old sample-spacing while `x0` was the old origin. In + other-words the old-sample points (knot-points) for which the `cj` + represent spline coefficients were at equally-spaced points of: + + oldx = x0 + j*dx j=0...N-1, with N=len(cj) + + Edges are handled using mirror-symmetric boundary conditions. + + Parameters + ---------- + cj : ndarray + cublic spline coefficients + newx : ndarray + New set of points. + dx : float, optional + Old sample-spacing, the default value is 1.0. + x0 : int, optional + Old origin, the default value is 0. + + Returns + ------- + res : ndarray + Evaluated a cubic spline points. + + See Also + -------- + cspline1d : Compute cubic spline coefficients for rank-1 array. + + """ + newx = (cupy.asarray(newx) - x0) / float(dx) + res = cupy.zeros_like(newx, dtype=cj.dtype) + if res.size == 0: + return res + N = len(cj) + cond1 = newx < 0 + cond2 = newx > (N - 1) + cond3 = ~(cond1 | cond2) + # handle general mirror-symmetry + res[cond1] = cspline1d_eval(cj, -newx[cond1]) + res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) + newx = newx[cond3] + if newx.size == 0: + return res + result = cupy.zeros_like(newx, dtype=cj.dtype) + jlower = cupy.floor(newx - 2).astype(int) + 1 + for i in range(4): + thisj = jlower + i + indj = thisj.clip(0, N - 1) # handle edge cases + result += cj[indj] * _cubic(newx - thisj) + res[cond3] = result + return res + + +def qspline1d_eval(cj, newx, dx=1.0, x0=0): + """Evaluate a quadratic spline at the new set of points. + + Parameters + ---------- + cj : ndarray + Quadratic spline coefficients + newx : ndarray + New set of points. + dx : float, optional + Old sample-spacing, the default value is 1.0. + x0 : int, optional + Old origin, the default value is 0. + + Returns + ------- + res : ndarray + Evaluated a quadratic spline points. + + See Also + -------- + qspline1d : Compute quadratic spline coefficients for rank-1 array. + + Notes + ----- + `dx` is the old sample-spacing while `x0` was the old origin. In + other-words the old-sample points (knot-points) for which the `cj` + represent spline coefficients were at equally-spaced points of:: + + oldx = x0 + j*dx j=0...N-1, with N=len(cj) + + Edges are handled using mirror-symmetric boundary conditions. + + """ + newx = (cupy.asarray(newx) - x0) / dx + res = cupy.zeros_like(newx) + if res.size == 0: + return res + N = len(cj) + cond1 = newx < 0 + cond2 = newx > (N - 1) + cond3 = ~(cond1 | cond2) + # handle general mirror-symmetry + res[cond1] = qspline1d_eval(cj, -newx[cond1]) + res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) + newx = newx[cond3] + if newx.size == 0: + return res + result = cupy.zeros_like(newx) + jlower = cupy.floor(newx - 1.5).astype(int) + 1 + for i in range(3): + thisj = jlower + i + indj = thisj.clip(0, N - 1) # handle edge cases + result += cj[indj] * _quadratic(newx - thisj) + res[cond3] = result + return res + + +def cspline2d(signal, lamb=0.0, precision=-1.0): + """ + Coefficients for 2-D cubic (3rd order) B-spline. + + Return the third-order B-spline coefficients over a regularly spaced + input grid for the two-dimensional input image. + + Parameters + ---------- + input : ndarray + The input signal. + lamb : float + Specifies the amount of smoothing in the transfer function. + precision : float + Specifies the precision for computing the infinite sum needed to apply + mirror-symmetric boundary conditions. + + Returns + ------- + output : ndarray + The filtered signal. + """ + if lamb <= 1 / 144.0: + # Normal cubic spline + r = -2 + np.sqrt(3.0) + out = _symiirorder1_nd(signal, -r * 6.0, r, precision=precision, + axis=-1) + out = _symiirorder1_nd(out, -r * 6.0, r, precision=precision, + axis=0) + return out + + r, omega = compute_root_from_lambda(lamb) + out = _symiirorder2_nd(signal, r, omega, precision=precision, axis=-1) + out = _symiirorder2_nd(out, r, omega, precision=precision, axis=0) + return out + + +def qspline2d(signal, lamb=0.0, precision=-1.0): + """ + Coefficients for 2-D quadratic (2nd order) B-spline. + + Return the second-order B-spline coefficients over a regularly spaced + input grid for the two-dimensional input image. + + Parameters + ---------- + input : ndarray + The input signal. + lamb : float + Specifies the amount of smoothing in the transfer function. + precision : float + Specifies the precision for computing the infinite sum needed to apply + mirror-symmetric boundary conditions. + + Returns + ------- + output : ndarray + The filtered signal. + """ + + if lamb > 0: + raise ValueError('lambda must be negative or zero') + + # normal quadratic spline + r = -3 + 2 * np.sqrt(2.0) + + out = _symiirorder1_nd(signal, -r * 8.0, r, precision=precision, axis=-1) + out = _symiirorder1_nd(out, -r * 8.0, r, precision=precision, axis=0) + return out + + +def spline_filter(Iin, lmbda=5.0): + """Smoothing spline (cubic) filtering of a rank-2 array. + + Filter an input data set, `Iin`, using a (cubic) smoothing spline of + fall-off `lmbda`. + + Parameters + ---------- + Iin : array_like + input data set + lmbda : float, optional + spline smooghing fall-off value, default is `5.0`. + + Returns + ------- + res : ndarray + filtered input data + + """ + intype = Iin.dtype.char + hcol = cupy.asarray([1.0, 4.0, 1.0], 'f') / 6.0 + if intype in ['F', 'D']: + Iin = Iin.astype('F') + ckr = cspline2d(Iin.real, lmbda) + cki = cspline2d(Iin.imag, lmbda) + outr = sepfir2d(ckr, hcol, hcol) + outi = sepfir2d(cki, hcol, hcol) + out = (outr + 1j * outi).astype(intype) + elif intype in ['f', 'd']: + ckr = cspline2d(Iin, lmbda) + out = sepfir2d(ckr, hcol, hcol) + out = out.astype(intype) + else: + raise TypeError("Invalid data type for Iin") + return out + + +_gauss_spline_kernel = cupy.ElementwiseKernel( + "T x, int32 n", + "T output", + """ + output = 1 / sqrt( 2.0 * M_PI * signsq ) * exp( -( x * x ) * r_signsq ); + """, + "_gauss_spline_kernel", + options=("-std=c++11",), + loop_prep="const double signsq { ( n + 1 ) / 12.0 }; \ + const double r_signsq { 0.5 / signsq };", +) + + +def gauss_spline(x, n): + r"""Gaussian approximation to B-spline basis function of order n. + + Parameters + ---------- + x : array_like + a knot vector + n : int + The order of the spline. Must be nonnegative, i.e. n >= 0 + + Returns + ------- + res : ndarray + B-spline basis function values approximated by a zero-mean Gaussian + function. + + Notes + ----- + The B-spline basis function can be approximated well by a zero-mean + Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12` + for large `n` : + + .. math:: \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma}) + + See [1]_, [2]_ for more information. + + References + ---------- + .. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen + F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. + In: Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational + Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer + Science, vol 4485. Springer, Berlin, Heidelberg + .. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html + """ # NOQA + x = cupy.asarray(x) + return _gauss_spline_kernel(x, n) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_czt.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_czt.py new file mode 100644 index 0000000000000000000000000000000000000000..786e624d32561a5dd29b2f4d9c09d7e112173e2d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_czt.py @@ -0,0 +1,451 @@ +# This program is public domain +# Authors: Paul Kienzle, Nadav Horesh +# +# Adapted from scipy 1.10.1. +# +""" +Chirp z-transform. + +We provide two interfaces to the chirp z-transform: an object interface +which precalculates part of the transform and can be applied efficiently +to many different data sets, and a functional interface which is applied +only to the given data set. + +Transforms +---------- + +CZT : callable (x, axis=-1) -> array + Define a chirp z-transform that can be applied to different signals. +ZoomFFT : callable (x, axis=-1) -> array + Define a Fourier transform on a range of frequencies. + +Functions +--------- + +czt : array + Compute the chirp z-transform for a signal. +zoom_fft : array + Compute the Fourier transform on a range of frequencies. +""" + +import cmath +import numbers +import cupy +from numpy import pi +from cupyx.scipy.fft import fft, ifft, next_fast_len + +__all__ = ['czt', 'zoom_fft', 'CZT', 'ZoomFFT', 'czt_points'] + + +def _validate_sizes(n, m): + if n < 1 or not isinstance(n, numbers.Integral): + raise ValueError('Invalid number of CZT data ' + f'points ({n}) specified. ' + 'n must be positive and integer type.') + + if m is None: + m = n + elif m < 1 or not isinstance(m, numbers.Integral): + raise ValueError('Invalid number of CZT output ' + f'points ({m}) specified. ' + 'm must be positive and integer type.') + + return m + + +def czt_points(m, w=None, a=1+0j): + """ + Return the points at which the chirp z-transform is computed. + + Parameters + ---------- + m : int + The number of points desired. + w : complex, optional + The ratio between points in each step. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + + Returns + ------- + out : ndarray + The points in the Z plane at which `CZT` samples the z-transform, + when called with arguments `m`, `w`, and `a`, as complex numbers. + + See Also + -------- + CZT : Class that creates a callable chirp z-transform function. + czt : Convenience function for quickly calculating CZT. + scipy.signal.czt_points + + """ + m = _validate_sizes(1, m) + + k = cupy.arange(m) + + a = 1.0 * a # at least float + + if w is None: + # Nothing specified, default to FFT + return a * cupy.exp(2j * pi * k / m) + else: + # w specified + w = 1.0 * w # at least float + return a * w**-k + + +class CZT: + """ + Create a callable chirp z-transform function. + + Transform to compute the frequency response around a spiral. + Objects of this class are callables which can compute the + chirp z-transform on their inputs. This object precalculates the constant + chirps used in the given transform. + + Parameters + ---------- + n : int + The size of the signal. + m : int, optional + The number of output points desired. Default is `n`. + w : complex, optional + The ratio between points in each step. This must be precise or the + accumulated error will degrade the tail of the output sequence. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + + Returns + ------- + f : CZT + Callable object ``f(x, axis=-1)`` for computing the chirp z-transform + on `x`. + + See Also + -------- + czt : Convenience function for quickly calculating CZT. + ZoomFFT : Class that creates a callable partial FFT function. + scipy.signal.CZT + + Notes + ----- + The defaults are chosen such that ``f(x)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, m)`` is equivalent to + ``fft.fft(x, m)``. + + If `w` does not lie on the unit circle, then the transform will be + around a spiral with exponentially-increasing radius. Regardless, + angle will increase linearly. + + For transforms that do lie on the unit circle, accuracy is better when + using `ZoomFFT`, since any numerical error in `w` is + accumulated for long data lengths, drifting away from the unit circle. + + The chirp z-transform can be faster than an equivalent FFT with + zero padding. Try it with your own array sizes to see. + + However, the chirp z-transform is considerably less precise than the + equivalent zero-padded FFT. + + As this CZT is implemented using the Bluestein algorithm [1]_, it can + compute large prime-length Fourier transforms in O(N log N) time, rather + than the O(N**2) time required by the direct DFT calculation. + (`scipy.fft` also uses Bluestein's algorithm'.) + + (The name "chirp z-transform" comes from the use of a chirp in the + Bluestein algorithm [2]_. It does not decompose signals into chirps, like + other transforms with "chirp" in the name.) + + References + ---------- + .. [1] Leo I. Bluestein, "A linear filtering approach to the computation + of the discrete Fourier transform," Northeast Electronics Research + and Engineering Meeting Record 10, 218-219 (1968). + .. [2] Rabiner, Schafer, and Rader, "The chirp z-transform algorithm and + its application," Bell Syst. Tech. J. 48, 1249-1292 (1969). + + """ + + def __init__(self, n, m=None, w=None, a=1+0j): + m = _validate_sizes(n, m) + + k = cupy.arange(max(m, n), dtype=cupy.min_scalar_type(-max(m, n)**2)) + + if w is None: + # Nothing specified, default to FFT-like + w = cmath.exp(-2j*pi/m) + wk2 = cupy.exp(-(1j * pi * ((k**2) % (2*m))) / m) + else: + # w specified + wk2 = w**(k**2/2.) + + a = 1.0 * a # at least float + + self.w, self.a = w, a + self.m, self.n = m, n + + nfft = next_fast_len(n + m - 1) + self._Awk2 = a**-k[:n] * wk2[:n] + self._nfft = nfft + self._Fwk2 = fft(1/cupy.hstack((wk2[n-1:0:-1], wk2[:m])), nfft) + self._wk2 = wk2[:m] + self._yidx = slice(n-1, n+m-1) + + def __call__(self, x, *, axis=-1): + """ + Calculate the chirp z-transform of a signal. + + Parameters + ---------- + x : array + The signal to transform. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + An array of the same dimensions as `x`, but with the length of the + transformed axis set to `m`. + """ + x = cupy.asarray(x) + if x.shape[axis] != self.n: + raise ValueError(f"CZT defined for length {self.n}, not " + f"{x.shape[axis]}") + # Calculate transpose coordinates, to allow operation on any given axis + trnsp = list(range(x.ndim)) + trnsp[axis], trnsp[-1] = trnsp[-1], trnsp[axis] + x = x.transpose(*trnsp) + y = ifft(self._Fwk2 * fft(x*self._Awk2, self._nfft)) + y = y[..., self._yidx] * self._wk2 + return y.transpose(*trnsp) + + def points(self): + """ + Return the points at which the chirp z-transform is computed. + """ + return czt_points(self.m, self.w, self.a) + + +class ZoomFFT(CZT): + """ + Create a callable zoom FFT transform function. + + This is a specialization of the chirp z-transform (`CZT`) for a set of + equally-spaced frequencies around the unit circle, used to calculate a + section of the FFT more efficiently than calculating the entire FFT and + truncating. [1]_ + + Parameters + ---------- + n : int + The size of the signal. + fn : array_like + A length-2 sequence [`f1`, `f2`] giving the frequency range, or a + scalar, for which the range [0, `fn`] is assumed. + m : int, optional + The number of points to evaluate. Default is `n`. + fs : float, optional + The sampling frequency. If ``fs=10`` represented 10 kHz, for example, + then `f1` and `f2` would also be given in kHz. + The default sampling frequency is 2, so `f1` and `f2` should be + in the range [0, 1] to keep the transform below the Nyquist + frequency. + endpoint : bool, optional + If True, `f2` is the last sample. Otherwise, it is not included. + Default is False. + + Returns + ------- + f : ZoomFFT + Callable object ``f(x, axis=-1)`` for computing the zoom FFT on `x`. + + See Also + -------- + zoom_fft : Convenience function for calculating a zoom FFT. + scipy.signal.ZoomFFT + + Notes + ----- + The defaults are chosen such that ``f(x, 2)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, 2, m)`` is equivalent to + ``fft.fft(x, m)``. + + Sampling frequency is 1/dt, the time step between samples in the + signal `x`. The unit circle corresponds to frequencies from 0 up + to the sampling frequency. The default sampling frequency of 2 + means that `f1`, `f2` values up to the Nyquist frequency are in the + range [0, 1). For `f1`, `f2` values expressed in radians, a sampling + frequency of 2*pi should be used. + + Remember that a zoom FFT can only interpolate the points of the existing + FFT. It cannot help to resolve two separate nearby frequencies. + Frequency resolution can only be increased by increasing acquisition + time. + + These functions are implemented using Bluestein's algorithm (as is + `scipy.fft`). [2]_ + + References + ---------- + .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its + applications", pg 29 (1970) + https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf + .. [2] Leo I. Bluestein, "A linear filtering approach to the computation + of the discrete Fourier transform," Northeast Electronics Research + and Engineering Meeting Record 10, 218-219 (1968). + """ + + def __init__(self, n, fn, m=None, *, fs=2, endpoint=False): + m = _validate_sizes(n, m) + + k = cupy.arange(max(m, n), dtype=cupy.min_scalar_type(-max(m, n)**2)) + + fn = cupy.asarray(fn) + if cupy.size(fn) == 2: + f1, f2 = fn + elif cupy.size(fn) == 1: + f1, f2 = 0.0, fn + else: + raise ValueError('fn must be a scalar or 2-length sequence') + + self.f1, self.f2, self.fs = f1, f2, fs + + if endpoint: + scale = ((f2 - f1) * m) / (fs * (m - 1)) + else: + scale = (f2 - f1) / fs + a = cmath.exp(2j * pi * f1/fs) + wk2 = cupy.exp(-(1j * pi * scale * k**2) / m) + + self.w = cmath.exp(-2j*pi/m * scale) + self.a = a + self.m, self.n = m, n + + ak = cupy.exp(-2j * pi * f1/fs * k[:n]) + self._Awk2 = ak * wk2[:n] + + nfft = next_fast_len(n + m - 1) + self._nfft = nfft + self._Fwk2 = fft(1/cupy.hstack((wk2[n-1:0:-1], wk2[:m])), nfft) + self._wk2 = wk2[:m] + self._yidx = slice(n-1, n+m-1) + + +def czt(x, m=None, w=None, a=1+0j, *, axis=-1): + """ + Compute the frequency response around a spiral in the Z plane. + + Parameters + ---------- + x : array + The signal to transform. + m : int, optional + The number of output points desired. Default is the length of the + input data. + w : complex, optional + The ratio between points in each step. This must be precise or the + accumulated error will degrade the tail of the output sequence. + Defaults to equally spaced points around the entire unit circle. + a : complex, optional + The starting point in the complex plane. Default is 1+0j. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + An array of the same dimensions as `x`, but with the length of the + transformed axis set to `m`. + + See Also + -------- + CZT : Class that creates a callable chirp z-transform function. + zoom_fft : Convenience function for partial FFT calculations. + scipy.signal.czt + + Notes + ----- + The defaults are chosen such that ``signal.czt(x)`` is equivalent to + ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.czt(x, m)`` is + equivalent to ``fft.fft(x, m)``. + + If the transform needs to be repeated, use `CZT` to construct a + specialized transform function which can be reused without + recomputing constants. + + An example application is in system identification, repeatedly evaluating + small slices of the z-transform of a system, around where a pole is + expected to exist, to refine the estimate of the pole's true location. [1]_ + + References + ---------- + .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its + applications", pg 20 (1970) + https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf + + """ + x = cupy.asarray(x) + transform = CZT(x.shape[axis], m=m, w=w, a=a) + return transform(x, axis=axis) + + +def zoom_fft(x, fn, m=None, *, fs=2, endpoint=False, axis=-1): + """ + Compute the DFT of `x` only for frequencies in range `fn`. + + Parameters + ---------- + x : array + The signal to transform. + fn : array_like + A length-2 sequence [`f1`, `f2`] giving the frequency range, or a + scalar, for which the range [0, `fn`] is assumed. + m : int, optional + The number of points to evaluate. The default is the length of `x`. + fs : float, optional + The sampling frequency. If ``fs=10`` represented 10 kHz, for example, + then `f1` and `f2` would also be given in kHz. + The default sampling frequency is 2, so `f1` and `f2` should be + in the range [0, 1] to keep the transform below the Nyquist + frequency. + endpoint : bool, optional + If True, `f2` is the last sample. Otherwise, it is not included. + Default is False. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + + Returns + ------- + out : ndarray + The transformed signal. The Fourier transform will be calculated + at the points f1, f1+df, f1+2df, ..., f2, where df=(f2-f1)/m. + + See Also + -------- + ZoomFFT : Class that creates a callable partial FFT function. + scipy.signal.zoom_fft + + Notes + ----- + The defaults are chosen such that ``signal.zoom_fft(x, 2)`` is equivalent + to ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.zoom_fft(x, 2, m)`` + is equivalent to ``fft.fft(x, m)``. + + To graph the magnitude of the resulting transform, use:: + + plot(linspace(f1, f2, m, endpoint=False), + abs(zoom_fft(x, [f1, f2], m))) + + If the transform needs to be repeated, use `ZoomFFT` to construct + a specialized transform function which can be reused without + recomputing constants. + """ + x = cupy.asarray(x) + transform = ZoomFFT(x.shape[axis], fn, m=m, fs=fs, endpoint=endpoint) + return transform(x, axis=axis) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_filter_design.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..3808995a46e1dfbe72b32160f2a5c70ad646d4b1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_filter_design.py @@ -0,0 +1,811 @@ +import operator +from math import pi +import warnings + +import cupy +from cupy.polynomial.polynomial import ( + polyval as npp_polyval, polyvalfromroots as npp_polyvalfromroots) +import cupyx.scipy.fft as sp_fft +from cupyx import jit +from cupyx.scipy._lib._util import float_factorial +from cupyx.scipy.signal._polyutils import roots + +EPSILON = 2e-16 + + +def _try_convert_to_int(x): + """Return an integer for ``5`` and ``array(5)``, fail if not an + integer scalar. + + NB: would be easier if ``operator.index(cupy.array(5))`` worked + (numpy.array(5) does) + """ + if isinstance(x, cupy.ndarray): + if x.ndim == 0: + value = x.item() + else: + return x, False + else: + value = x + try: + return operator.index(value), True + except TypeError: + return value, False + + +def findfreqs(num, den, N, kind='ba'): + """ + Find array of frequencies for computing the response of an analog filter. + + Parameters + ---------- + num, den : array_like, 1-D + The polynomial coefficients of the numerator and denominator of the + transfer function of the filter or LTI system, where the coefficients + are ordered from highest to lowest degree. Or, the roots of the + transfer function numerator and denominator (i.e., zeroes and poles). + N : int + The length of the array to be computed. + kind : str {'ba', 'zp'}, optional + Specifies whether the numerator and denominator are specified by their + polynomial coefficients ('ba'), or their roots ('zp'). + + Returns + ------- + w : (N,) ndarray + A 1-D array of frequencies, logarithmically spaced. + + Warning + ------- + This function may synchronize the device. + + See Also + -------- + scipy.signal.find_freqs + + Examples + -------- + Find a set of nine frequencies that span the "interesting part" of the + frequency response for the filter with the transfer function + + H(s) = s / (s^2 + 8s + 25) + + >>> from scipy import signal + >>> signal.findfreqs([1, 0], [1, 8, 25], N=9) + array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01, + 3.16227766e-01, 1.00000000e+00, 3.16227766e+00, + 1.00000000e+01, 3.16227766e+01, 1.00000000e+02]) + """ + if kind == 'ba': + ep = cupy.atleast_1d(roots(den)) + 0j + tz = cupy.atleast_1d(roots(num)) + 0j + elif kind == 'zp': + ep = cupy.atleast_1d(den) + 0j + tz = cupy.atleast_1d(num) + 0j + else: + raise ValueError("input must be one of {'ba', 'zp'}") + + if len(ep) == 0: + ep = cupy.atleast_1d(-1000) + 0j + + ez = cupy.r_[ + cupy.compress(ep.imag >= 0, ep, axis=-1), + cupy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)] + + integ = cupy.abs(ez) < 1e-10 + hfreq = cupy.around(cupy.log10(cupy.max(3 * cupy.abs(ez.real + integ) + + 1.5 * ez.imag)) + 0.5) + lfreq = cupy.around(cupy.log10(0.1 * cupy.min(cupy.abs((ez + integ).real) + + 2 * ez.imag)) - 0.5) + w = cupy.logspace(lfreq, hfreq, N) + return w + + +def freqs(b, a, worN=200, plot=None): + """ + Compute frequency response of analog filter. + + Given the M-order numerator `b` and N-order denominator `a` of an analog + filter, compute its frequency response:: + + b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M] + H(w) = ---------------------------------------------- + a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N] + + Parameters + ---------- + b : array_like + Numerator of a linear filter. + a : array_like + Denominator of a linear filter. + worN : {None, int, array_like}, optional + If None, then compute at 200 frequencies around the interesting parts + of the response curve (determined by pole-zero locations). If a single + integer, then compute at that many frequencies. Otherwise, compute the + response at the angular frequencies (e.g., rad/s) given in `worN`. + plot : callable, optional + A callable that takes two arguments. If given, the return parameters + `w` and `h` are passed to plot. Useful for plotting the frequency + response inside `freqs`. + + Returns + ------- + w : ndarray + The angular frequencies at which `h` was computed. + h : ndarray + The frequency response. + + See Also + -------- + scipy.signal.freqs + freqz : Compute the frequency response of a digital filter. + + """ + if worN is None: + # For backwards compatibility + w = findfreqs(b, a, 200) + + else: + N, _is_int = _try_convert_to_int(worN) + if _is_int: + w = findfreqs(b, a, N) + else: + w = cupy.atleast_1d(worN) + + s = 1j * w + h = cupy.polyval(b, s) / cupy.polyval(a, s) + + if plot is not None: + plot(w, h) + + return w, h + + +def freqs_zpk(z, p, k, worN=200): + """ + Compute frequency response of analog filter. + + Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its + frequency response:: + + (jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1]) + H(w) = k * ---------------------------------------- + (jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1]) + + Parameters + ---------- + z : array_like + Zeroes of a linear filter + p : array_like + Poles of a linear filter + k : scalar + Gain of a linear filter + worN : {None, int, array_like}, optional + If None, then compute at 200 frequencies around the interesting parts + of the response curve (determined by pole-zero locations). If a single + integer, then compute at that many frequencies. Otherwise, compute the + response at the angular frequencies (e.g., rad/s) given in `worN`. + + Returns + ------- + w : ndarray + The angular frequencies at which `h` was computed. + h : ndarray + The frequency response. + + See Also + -------- + scipy.signal.freqs_zpk + + """ + k = cupy.asarray(k) + if k.size > 1: + raise ValueError('k must be a single scalar gain') + + if worN is None: + # For backwards compatibility + w = findfreqs(z, p, 200, kind='zp') + else: + N, _is_int = _try_convert_to_int(worN) + if _is_int: + w = findfreqs(z, p, worN, kind='zp') + else: + w = worN + + w = cupy.atleast_1d(w) + s = 1j * w + num = npp_polyvalfromroots(s, z) + den = npp_polyvalfromroots(s, p) + h = k * num/den + return w, h + + +def _is_int_type(x): + """ + Check if input is of a scalar integer type (so ``5`` and ``array(5)`` will + pass, while ``5.0`` and ``array([5])`` will fail. + """ + if cupy.ndim(x) != 0: + # Older versions of NumPy did not raise for np.array([1]).__index__() + # This is safe to remove when support for those versions is dropped + return False + try: + operator.index(x) + except TypeError: + return False + else: + return True + + +def group_delay(system, w=512, whole=False, fs=2 * cupy.pi): + r"""Compute the group delay of a digital filter. + + The group delay measures by how many samples amplitude envelopes of + various spectral components of a signal are delayed by a filter. + It is formally defined as the derivative of continuous (unwrapped) phase:: + + d jw + D(w) = - -- arg H(e) + dw + + Parameters + ---------- + system : tuple of array_like (b, a) + Numerator and denominator coefficients of a filter transfer function. + w : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). + + If an array_like, compute the delay at the frequencies given. These + are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if w is array_like. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + Returns + ------- + w : ndarray + The frequencies at which group delay was computed, in the same units + as `fs`. By default, `w` is normalized to the range [0, pi) + (radians/sample). + gd : ndarray + The group delay. + + See Also + -------- + freqz : Frequency response of a digital filter + + Notes + ----- + The similar function in MATLAB is called `grpdelay`. + + If the transfer function :math:`H(z)` has zeros or poles on the unit + circle, the group delay at corresponding frequencies is undefined. + When such a case arises the warning is raised and the group delay + is set to 0 at those frequencies. + + For the details of numerical computation of the group delay refer to [1]_. + + References + ---------- + .. [1] Richard G. Lyons, "Understanding Digital Signal Processing, + 3rd edition", p. 830. + + """ + if w is None: + # For backwards compatibility + w = 512 + + if _is_int_type(w): + if whole: + w = cupy.linspace(0, 2 * cupy.pi, w, endpoint=False) + else: + w = cupy.linspace(0, cupy.pi, w, endpoint=False) + else: + w = cupy.atleast_1d(w) + w = 2 * cupy.pi * w / fs + + b, a = map(cupy.atleast_1d, system) + c = cupy.convolve(b, a[::-1]) + cr = c * cupy.arange(c.size) + z = cupy.exp(-1j * w) + num = cupy.polyval(cr[::-1], z) + den = cupy.polyval(c[::-1], z) + gd = cupy.real(num / den) - a.size + 1 + singular = ~cupy.isfinite(gd) + gd[singular] = 0 + + w = w * fs / (2 * cupy.pi) + return w, gd + + +def freqz(b, a=1, worN=512, whole=False, plot=None, fs=2*pi, + include_nyquist=False): + """ + Compute the frequency response of a digital filter. + + Given the M-order numerator `b` and N-order denominator `a` of a digital + filter, compute its frequency response:: + + jw -jw -jwM + jw B(e ) b[0] + b[1]e + ... + b[M]e + H(e ) = ------ = ----------------------------------- + jw -jw -jwN + A(e ) a[0] + a[1]e + ... + a[N]e + + Parameters + ---------- + b : array_like + Numerator of a linear filter. If `b` has dimension greater than 1, + it is assumed that the coefficients are stored in the first dimension, + and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies + array must be compatible for broadcasting. + a : array_like + Denominator of a linear filter. If `b` has dimension greater than 1, + it is assumed that the coefficients are stored in the first dimension, + and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies + array must be compatible for broadcasting. + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). This is a convenient alternative to:: + + cupy.linspace(0, fs if whole else fs/2, N, + endpoint=include_nyquist) + + Using a number that is fast for FFT computations can result in + faster computations (see Notes). + + If an array_like, compute the response at the frequencies given. + These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if worN is array_like. + plot : callable + A callable that takes two arguments. If given, the return parameters + `w` and `h` are passed to plot. Useful for plotting the frequency + response inside `freqz`. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + include_nyquist : bool, optional + If `whole` is False and `worN` is an integer, setting `include_nyquist` + to True will include the last frequency (Nyquist frequency) and is + otherwise ignored. + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqz_zpk + sosfreqz + scipy.signal.freqz + + + Notes + ----- + Using Matplotlib's :func:`matplotlib.pyplot.plot` function as the callable + for `plot` produces unexpected results, as this plots the real part of the + complex transfer function, not the magnitude. + Try ``lambda w, h: plot(w, cupy.abs(h))``. + + A direct computation via (R)FFT is used to compute the frequency response + when the following conditions are met: + + 1. An integer value is given for `worN`. + 2. `worN` is fast to compute via FFT (i.e., + `next_fast_len(worN) ` equals `worN`). + 3. The denominator coefficients are a single value (``a.shape[0] == 1``). + 4. `worN` is at least as long as the numerator coefficients + (``worN >= b.shape[0]``). + 5. If ``b.ndim > 1``, then ``b.shape[-1] == 1``. + + For long FIR filters, the FFT approach can have lower error and be much + faster than the equivalent direct polynomial calculation. + """ + b = cupy.atleast_1d(b) + a = cupy.atleast_1d(a) + + if worN is None: + # For backwards compatibility + worN = 512 + + h = None + + N, _is_int = _try_convert_to_int(worN) + if _is_int: + if N < 0: + raise ValueError(f'worN must be nonnegative, got {N}') + lastpoint = 2 * pi if whole else pi + + # if include_nyquist is true and whole is false, w should + # include end point + w = cupy.linspace( + 0, lastpoint, N, endpoint=include_nyquist and not whole) + + use_fft = (a.size == 1 and + N >= b.shape[0] and + sp_fft.next_fast_len(N) == N and + (b.ndim == 1 or (b.shape[-1] == 1)) + ) + + if use_fft: + # if N is fast, 2 * N will be fast, too, so no need to check + n_fft = N if whole else N * 2 + if cupy.isrealobj(b) and cupy.isrealobj(a): + fft_func = sp_fft.rfft + else: + fft_func = sp_fft.fft + + h = fft_func(b, n=n_fft, axis=0)[:N] + h /= a + if fft_func is sp_fft.rfft and whole: + # exclude DC and maybe Nyquist (no need to use axis_reverse + # here because we can build reversal with the truncation) + stop = -1 if n_fft % 2 == 1 else -2 + h_flip = slice(stop, 0, -1) + h = cupy.concatenate((h, h[h_flip].conj())) + if b.ndim > 1: + # Last axis of h has length 1, so drop it. + h = h[..., 0] + # Move the first axis of h to the end. + h = cupy.moveaxis(h, 0, -1) + else: + w = cupy.atleast_1d(worN) + w = 2 * pi * w / fs + + if h is None: # still need to compute using freqs w + zm1 = cupy.exp(-1j * w) + h = (npp_polyval(zm1, b, tensor=False) / + npp_polyval(zm1, a, tensor=False)) + + w = w * fs / (2 * pi) + + if plot is not None: + plot(w, h) + + return w, h + + +def freqz_zpk(z, p, k, worN=512, whole=False, fs=2*pi): + r""" + Compute the frequency response of a digital filter in ZPK form. + + Given the Zeros, Poles and Gain of a digital filter, compute its frequency + response: + + :math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])` + + where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are + the `poles`. + + Parameters + ---------- + z : array_like + Zeroes of a linear filter + p : array_like + Poles of a linear filter + k : scalar + Gain of a linear filter + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). + + If an array_like, compute the response at the frequencies given. + These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. Ignored if w is array_like. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqs : Compute the frequency response of an analog filter in TF form + freqs_zpk : Compute the frequency response of an analog filter in ZPK form + freqz : Compute the frequency response of a digital filter in TF form + scipy.signal.freqz_zpk + + """ + z, p = map(cupy.atleast_1d, (z, p)) + + if whole: + lastpoint = 2 * pi + else: + lastpoint = pi + + if worN is None: + # For backwards compatibility + w = cupy.linspace(0, lastpoint, 512, endpoint=False) + else: + N, _is_int = _try_convert_to_int(worN) + if _is_int: + w = cupy.linspace(0, lastpoint, N, endpoint=False) + else: + w = cupy.atleast_1d(worN) + w = 2 * pi * w / fs + + zm1 = cupy.exp(1j * w) + h = k * npp_polyvalfromroots(zm1, z) / npp_polyvalfromroots(zm1, p) + + w = w * fs / (2 * pi) + + return w, h + + +def _validate_sos(sos): + """Helper to validate a SOS input""" + sos = cupy.atleast_2d(sos) + if sos.ndim != 2: + raise ValueError('sos array must be 2D') + n_sections, m = sos.shape + if m != 6: + raise ValueError('sos array must be shape (n_sections, 6)') + if ((sos[:, 3] - 1) > 1e-15).any(): + raise ValueError('sos[:, 3] should be all ones') + return sos, n_sections + + +def sosfreqz(sos, worN=512, whole=False, fs=2*pi): + r""" + Compute the frequency response of a digital filter in SOS format. + + Given `sos`, an array with shape (n, 6) of second order sections of + a digital filter, compute the frequency response of the system function:: + + B0(z) B1(z) B{n-1}(z) + H(z) = ----- * ----- * ... * --------- + A0(z) A1(z) A{n-1}(z) + + for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and + denominator of the transfer function of the k-th second order section. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + worN : {None, int, array_like}, optional + If a single integer, then compute at that many frequencies (default is + N=512). Using a number that is fast for FFT computations can result + in faster computations (see Notes of `freqz`). + + If an array_like, compute the response at the frequencies given (must + be 1-D). These are in the same units as `fs`. + whole : bool, optional + Normally, frequencies are computed from 0 to the Nyquist frequency, + fs/2 (upper-half of unit-circle). If `whole` is True, compute + frequencies from 0 to fs. + fs : float, optional + The sampling frequency of the digital system. Defaults to 2*pi + radians/sample (so w is from 0 to pi). + + .. versionadded:: 1.2.0 + + Returns + ------- + w : ndarray + The frequencies at which `h` was computed, in the same units as `fs`. + By default, `w` is normalized to the range [0, pi) (radians/sample). + h : ndarray + The frequency response, as complex numbers. + + See Also + -------- + freqz, sosfilt + scipy.signal.sosfreqz + """ + sos, n_sections = _validate_sos(sos) + if n_sections == 0: + raise ValueError('Cannot compute frequencies with no sections') + h = 1. + for row in sos: + w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole, fs=fs) + h *= rowh + return w, h + + +def _hz_to_erb(hz): + """ + Utility for converting from frequency (Hz) to the + Equivalent Rectangular Bandwidth (ERB) scale + ERB = frequency / EarQ + minBW + """ + EarQ = 9.26449 + minBW = 24.7 + return hz / EarQ + minBW + + +@jit.rawkernel() +def _gammatone_iir_kernel(fs, freq, b, a): + tid = jit.blockIdx.x * jit.blockDim.x + jit.threadIdx.x + + EarQ = 9.26449 + minBW = 24.7 + erb = freq / EarQ + minBW + + T = 1./fs + bw = 2 * cupy.pi * 1.019 * erb + fr = 2 * freq * cupy.pi * T + bwT = bw * T + + # Calculate the gain to normalize the volume at the center frequency + g1 = -2 * cupy.exp(2j * fr) * T + g2 = 2 * cupy.exp(-(bwT) + 1j * fr) * T + g3 = cupy.sqrt(3 + 2 ** (3 / 2)) * cupy.sin(fr) + g4 = cupy.sqrt(3 - 2 ** (3 / 2)) * cupy.sin(fr) + g5 = cupy.exp(2j * fr) + + g = g1 + g2 * (cupy.cos(fr) - g4) + g *= (g1 + g2 * (cupy.cos(fr) + g4)) + g *= (g1 + g2 * (cupy.cos(fr) - g3)) + g *= (g1 + g2 * (cupy.cos(fr) + g3)) + g /= ((-2 / cupy.exp(2 * bwT) - 2 * g5 + 2 * (1 + g5) / + cupy.exp(bwT)) ** 4) + g_act = cupy.abs(g) + + # Calculate the numerator coefficients + if tid == 0: + b[tid] = (T ** 4) / g_act + a[tid] = 1 + elif tid == 1: + b[tid] = -4 * T ** 4 * cupy.cos(fr) / cupy.exp(bw * T) / g_act + a[tid] = -8 * cupy.cos(fr) / cupy.exp(bw * T) + elif tid == 2: + b[tid] = 6 * T ** 4 * cupy.cos(2 * fr) / cupy.exp(2 * bw * T) / g_act + a[tid] = 4 * (4 + 3 * cupy.cos(2 * fr)) / cupy.exp(2 * bw * T) + elif tid == 3: + b[tid] = -4 * T ** 4 * cupy.cos(3 * fr) / cupy.exp(3 * bw * T) / g_act + a[tid] = -8 * (6 * cupy.cos(fr) + cupy.cos(3 * fr)) + a[tid] /= cupy.exp(3 * bw * T) + elif tid == 4: + b[tid] = T ** 4 * cupy.cos(4 * fr) / cupy.exp(4 * bw * T) / g_act + a[tid] = 2 * (18 + 16 * cupy.cos(2 * fr) + cupy.cos(4 * fr)) + a[tid] /= cupy.exp(4 * bw * T) + elif tid == 5: + a[tid] = -8 * (6 * cupy.cos(fr) + cupy.cos(3 * fr)) + a[tid] /= cupy.exp(5 * bw * T) + elif tid == 6: + a[tid] = 4 * (4 + 3 * cupy.cos(2 * fr)) / cupy.exp(6 * bw * T) + elif tid == 7: + a[tid] = -8 * cupy.cos(fr) / cupy.exp(7 * bw * T) + elif tid == 8: + a[tid] = cupy.exp(-8 * bw * T) + + +def gammatone(freq, ftype, order=None, numtaps=None, fs=None): + """ + Gammatone filter design. + + This function computes the coefficients of an FIR or IIR gammatone + digital filter [1]_. + + Parameters + ---------- + freq : float + Center frequency of the filter (expressed in the same units + as `fs`). + ftype : {'fir', 'iir'} + The type of filter the function generates. If 'fir', the function + will generate an Nth order FIR gammatone filter. If 'iir', the + function will generate an 8th order digital IIR filter, modeled as + as 4th order gammatone filter. + order : int, optional + The order of the filter. Only used when ``ftype='fir'``. + Default is 4 to model the human auditory system. Must be between + 0 and 24. + numtaps : int, optional + Length of the filter. Only used when ``ftype='fir'``. + Default is ``fs*0.015`` if `fs` is greater than 1000, + 15 if `fs` is less than or equal to 1000. + fs : float, optional + The sampling frequency of the signal. `freq` must be between + 0 and ``fs/2``. Default is 2. + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials of the filter. + + Raises + ------ + ValueError + If `freq` is less than or equal to 0 or greater than or equal to + ``fs/2``, if `ftype` is not 'fir' or 'iir', if `order` is less than + or equal to 0 or greater than 24 when ``ftype='fir'`` + + See Also + -------- + firwin + iirfilter + + References + ---------- + .. [1] Slaney, Malcolm, "An Efficient Implementation of the + Patterson-Holdsworth Auditory Filter Bank", Apple Computer + Technical Report 35, 1993, pp.3-8, 34-39. + """ + # Converts freq to float + freq = float(freq) + + # Set sampling rate if not passed + if fs is None: + fs = 2 + fs = float(fs) + + # Check for invalid cutoff frequency or filter type + ftype = ftype.lower() + filter_types = ['fir', 'iir'] + if not 0 < freq < fs / 2: + raise ValueError("The frequency must be between 0 and {}" + " (nyquist), but given {}.".format(fs / 2, freq)) + if ftype not in filter_types: + raise ValueError('ftype must be either fir or iir.') + + # Calculate FIR gammatone filter + if ftype == 'fir': + # Set order and numtaps if not passed + if order is None: + order = 4 + order = operator.index(order) + + if numtaps is None: + numtaps = max(int(fs * 0.015), 15) + numtaps = operator.index(numtaps) + + # Check for invalid order + if not 0 < order <= 24: + raise ValueError("Invalid order: order must be > 0 and <= 24.") + + # Gammatone impulse response settings + t = cupy.arange(numtaps) / fs + bw = 1.019 * _hz_to_erb(freq) + + # Calculate the FIR gammatone filter + b = (t ** (order - 1)) * cupy.exp(-2 * cupy.pi * bw * t) + b *= cupy.cos(2 * cupy.pi * freq * t) + + # Scale the FIR filter so the frequency response is 1 at cutoff + scale_factor = 2 * (2 * cupy.pi * bw) ** (order) + scale_factor /= float_factorial(order - 1) + scale_factor /= fs + b *= scale_factor + a = [1.0] + + # Calculate IIR gammatone filter + elif ftype == 'iir': + # Raise warning if order and/or numtaps is passed + if order is not None: + warnings.warn('order is not used for IIR gammatone filter.') + if numtaps is not None: + warnings.warn('numtaps is not used for IIR gammatone filter.') + + # Create empty filter coefficient lists + b = cupy.empty(5) + a = cupy.empty(9) + _gammatone_iir_kernel((9,), (1,), (fs, freq, b, a)) + + return b, a diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_fir_filter_design.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_fir_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..06302075f6bfe293f5c29879cf9f1ff78b253032 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_fir_filter_design.py @@ -0,0 +1,918 @@ +"""Functions for FIR filter design.""" +import math + +from cupy.fft import fft, ifft +from cupy.linalg import solve, lstsq, LinAlgError +from cupyx.scipy.linalg import toeplitz, hankel +import cupyx +from cupyx.scipy.signal.windows import get_window + +import cupy +import numpy + + +__all__ = ["firls", "minimum_phase"] + + +def kaiser_beta(a): + """Compute the Kaiser parameter `beta`, given the attenuation `a`. + + Parameters + ---------- + a : float + The desired attenuation in the stopband and maximum ripple in + the passband, in dB. This should be a *positive* number. + + Returns + ------- + beta : float + The `beta` parameter to be used in the formula for a Kaiser window. + + References + ---------- + Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476. + + See Also + -------- + scipy.signal.kaiser_beta + + """ + if a > 50: + beta = 0.1102 * (a - 8.7) + elif a > 21: + beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21) + else: + beta = 0.0 + return beta + + +def kaiser_atten(numtaps, width): + """Compute the attenuation of a Kaiser FIR filter. + + Given the number of taps `N` and the transition width `width`, compute the + attenuation `a` in dB, given by Kaiser's formula: + + a = 2.285 * (N - 1) * pi * width + 7.95 + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. + width : float + The desired width of the transition region between passband and + stopband (or, in general, at any discontinuity) for the filter, + expressed as a fraction of the Nyquist frequency. + + Returns + ------- + a : float + The attenuation of the ripple, in dB. + + See Also + -------- + scipy.signal.kaiser_atten + """ + a = 2.285 * (numtaps - 1) * cupy.pi * width + 7.95 + return a + + +def kaiserord(ripple, width): + """ + Determine the filter window parameters for the Kaiser window method. + + The parameters returned by this function are generally used to create + a finite impulse response filter using the window method, with either + `firwin` or `firwin2`. + + Parameters + ---------- + ripple : float + Upper bound for the deviation (in dB) of the magnitude of the + filter's frequency response from that of the desired filter (not + including frequencies in any transition intervals). That is, if w + is the frequency expressed as a fraction of the Nyquist frequency, + A(w) is the actual frequency response of the filter and D(w) is the + desired frequency response, the design requirement is that:: + + abs(A(w) - D(w))) < 10**(-ripple/20) + + for 0 <= w <= 1 and w not in a transition interval. + width : float + Width of transition region, normalized so that 1 corresponds to pi + radians / sample. That is, the frequency is expressed as a fraction + of the Nyquist frequency. + + Returns + ------- + numtaps : int + The length of the Kaiser window. + beta : float + The beta parameter for the Kaiser window. + + See Also + -------- + scipy.signal.kaiserord + + + """ + A = abs(ripple) # in case somebody is confused as to what's meant + if A < 8: + # Formula for N is not valid in this range. + raise ValueError("Requested maximum ripple attenuation %f is too " + "small for the Kaiser formula." % A) + beta = kaiser_beta(A) + + # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter + # order, so we have to add 1 to get the number of taps. + numtaps = (A - 7.95) / 2.285 / (cupy.pi * width) + 1 + + return int(numpy.ceil(numtaps)), beta + + +_firwin_kernel = cupy.ElementwiseKernel( + "float64 win, int32 numtaps, raw float64 bands, int32 steps, bool scale", + "float64 h, float64 hc", + """ + const double m { static_cast( i ) - alpha ? + static_cast( i ) - alpha : 1.0e-20 }; + + double temp {}; + double left {}; + double right {}; + + for ( int s = 0; s < steps; s++ ) { + left = bands[s * 2 + 0] ? bands[s * 2 + 0] : 1.0e-20; + right = bands[s * 2 + 1] ? bands[s * 2 + 1] : 1.0e-20; + + temp += right * ( sin( right * m * M_PI ) / ( right * m * M_PI ) ); + temp -= left * ( sin( left * m * M_PI ) / ( left * m * M_PI ) ); + } + + temp *= win; + h = temp; + + double scale_frequency {}; + + if ( scale ) { + left = bands[0]; + right = bands[1]; + + if ( left == 0 ) { + scale_frequency = 0.0; + } else if ( right == 1 ) { + scale_frequency = 1.0; + } else { + scale_frequency = 0.5 * ( left + right ); + } + double c { cos( M_PI * m * scale_frequency ) }; + hc = temp * c; + } + """, + "_firwin_kernel", + options=("-std=c++11",), + loop_prep="const double alpha { 0.5 * ( numtaps - 1 ) };", +) + + +# Scipy <= 1.12 has a deprecated `nyq` argument (nyq = fs/2). +# Remove it here, to be forward-looking. +def firwin( + numtaps, + cutoff, + width=None, + window="hamming", + pass_zero=True, + scale=True, + fs=2, +): + """ + FIR filter design using the window method. + + This function computes the coefficients of a finite impulse response + filter. The filter will have linear phase; it will be Type I if + `numtaps` is odd and Type II if `numtaps` is even. + + Type II filters always have zero response at the Nyquist frequency, so a + ValueError exception is raised if firwin is called with `numtaps` even and + having a passband whose right end is at the Nyquist frequency. + + Parameters + ---------- + numtaps : int + Length of the filter (number of coefficients, i.e. the filter + order + 1). `numtaps` must be odd if a passband includes the + Nyquist frequency. + cutoff : float or 1D array_like + Cutoff frequency of filter (expressed in the same units as `fs`) + OR an array of cutoff frequencies (that is, band edges). In the + latter case, the frequencies in `cutoff` should be positive and + monotonically increasing between 0 and `fs/2`. The values 0 and + `fs/2` must not be included in `cutoff`. + width : float or None, optional + If `width` is not None, then assume it is the approximate width + of the transition region (expressed in the same units as `fs`) + for use in Kaiser FIR filter design. In this case, the `window` + argument is ignored. + window : string or tuple of string and parameter values, optional + Desired window to use. See `cusignal.get_window` for a list + of windows and required parameters. + pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'}, + optional + If True, the gain at the frequency 0 (i.e. the "DC gain") is 1. + If False, the DC gain is 0. Can also be a string argument for the + desired filter type (equivalent to ``btype`` in IIR design functions). + scale : bool, optional + Set to True to scale the coefficients so that the frequency + response is exactly unity at a certain frequency. + That frequency is either: + + - 0 (DC) if the first passband starts at 0 (i.e. pass_zero + is True) + - `fs/2` (the Nyquist frequency) if the first passband ends at + `fs/2` (i.e the filter is a single band highpass filter); + center of first passband otherwise + fs : float, optional + The sampling frequency of the signal. Each frequency in `cutoff` + must be between 0 and ``fs/2``. Default is 2. + + Returns + ------- + h : (numtaps,) ndarray + Coefficients of length `numtaps` FIR filter. + + Raises + ------ + ValueError + If any value in `cutoff` is less than or equal to 0 or greater + than or equal to ``fs/2``, if the values in `cutoff` are not strictly + monotonically increasing, or if `numtaps` is even but a passband + includes the Nyquist frequency. + + See Also + -------- + firwin2 + firls + minimum_phase + remez + + Examples + -------- + Low-pass from 0 to f: + + >>> import cusignal + >>> numtaps = 3 + >>> f = 0.1 + >>> cusignal.firwin(numtaps, f) + array([ 0.06799017, 0.86401967, 0.06799017]) + + Use a specific window function: + + >>> cusignal.firwin(numtaps, f, window='nuttall') + array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04]) + + High-pass ('stop' from 0 to f): + + >>> cusignal.firwin(numtaps, f, pass_zero=False) + array([-0.00859313, 0.98281375, -0.00859313]) + + Band-pass: + + >>> f1, f2 = 0.1, 0.2 + >>> cusignal.firwin(numtaps, [f1, f2], pass_zero=False) + array([ 0.06301614, 0.88770441, 0.06301614]) + + Band-stop: + + >>> cusignal.firwin(numtaps, [f1, f2]) + array([-0.00801395, 1.0160279 , -0.00801395]) + + Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]): + + >>> f3, f4 = 0.3, 0.4 + >>> cusignal.firwin(numtaps, [f1, f2, f3, f4]) + array([-0.01376344, 1.02752689, -0.01376344]) + + Multi-band (passbands are [f1, f2] and [f3,f4]): + + >>> cusignal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False) + array([ 0.04890915, 0.91284326, 0.04890915]) + + """ + + nyq = 0.5 * fs + + cutoff = cupy.atleast_1d(cutoff) / float(nyq) + + # Check for invalid input. + if cutoff.ndim > 1: + raise ValueError( + "The cutoff argument must be at most " "one-dimensional.") + if cutoff.size == 0: + raise ValueError("At least one cutoff frequency must be given.") + if cutoff.min() <= 0 or cutoff.max() >= 1: + raise ValueError( + "Invalid cutoff frequency: frequencies must be " + "greater than 0 and less than nyq." + ) + if cupy.any(cupy.diff(cutoff) <= 0): + raise ValueError( + "Invalid cutoff frequencies: the frequencies " + "must be strictly increasing." + ) + + if width is not None: + # A width was given. Find the beta parameter of the Kaiser window + # and set `window`. This overrides the value of `window` passed in. + atten = kaiser_atten(numtaps, float(width) / nyq) + beta = kaiser_beta(atten) + window = ("kaiser", beta) + + if isinstance(pass_zero, str): + if pass_zero in ("bandstop", "lowpass"): + if pass_zero == "lowpass": + if cutoff.size != 1: + raise ValueError( + "cutoff must have one element if " + 'pass_zero=="lowpass", got %s' % (cutoff.shape,) + ) + elif cutoff.size <= 1: + raise ValueError( + "cutoff must have at least two elements if " + 'pass_zero=="bandstop", got %s' % (cutoff.shape,) + ) + pass_zero = True + elif pass_zero in ("bandpass", "highpass"): + if pass_zero == "highpass": + if cutoff.size != 1: + raise ValueError( + "cutoff must have one element if " + 'pass_zero=="highpass", got %s' % (cutoff.shape,) + ) + elif cutoff.size <= 1: + raise ValueError( + "cutoff must have at least two elements if " + 'pass_zero=="bandpass", got %s' % (cutoff.shape,) + ) + pass_zero = False + else: + raise ValueError( + 'pass_zero must be True, False, "bandpass", ' + '"lowpass", "highpass", or "bandstop", got ' + "{}".format(pass_zero) + ) + + pass_nyquist = bool(cutoff.size & 1) ^ pass_zero + + if pass_nyquist and numtaps % 2 == 0: + raise ValueError( + "A filter with an even number of coefficients must " + "have zero response at the Nyquist rate." + ) + + # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff + # is even, and each pair in cutoff corresponds to passband. + cutoff = cupy.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist)) + + # `bands` is a 2D array; each row gives the left and right edges of + # a passband. + bands = cutoff.reshape(-1, 2) + + win = get_window(window, numtaps, fftbins=False) + h, hc = _firwin_kernel(win, numtaps, bands, bands.shape[0], scale) + if scale: + s = cupy.sum(hc) + h /= s + + # Build up the coefficients. + alpha = 0.5 * (numtaps - 1) + m = cupy.arange(0, numtaps) - alpha + h = 0 + for left, right in bands: + h += right * cupy.sinc(right * m) + h -= left * cupy.sinc(left * m) + + h *= win + + # Now handle scaling if desired. + if scale: + # Get the first passband. + left, right = bands[0] + if left == 0: + scale_frequency = 0.0 + elif right == 1: + scale_frequency = 1.0 + else: + scale_frequency = 0.5 * (left + right) + c = cupy.cos(cupy.pi * m * scale_frequency) + s = cupy.sum(h * c) + h /= s + + return h + + +def firwin2( + numtaps, + freq, + gain, + nfreqs=None, + window="hamming", + nyq=None, + antisymmetric=False, + fs=2.0, +): + """ + FIR filter design using the window method. + + From the given frequencies `freq` and corresponding gains `gain`, + this function constructs an FIR filter with linear phase and + (approximately) the given frequency response. + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. `numtaps` must be less than + `nfreqs`. + freq : array_like, 1-D + The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being + Nyquist. The Nyquist frequency is half `fs`. + The values in `freq` must be nondecreasing. A value can be repeated + once to implement a discontinuity. The first value in `freq` must + be 0, and the last value must be ``fs/2``. Values 0 and ``fs/2`` must + not be repeated. + gain : array_like + The filter gains at the frequency sampling points. Certain + constraints to gain values, depending on the filter type, are applied, + see Notes for details. + nfreqs : int, optional + The size of the interpolation mesh used to construct the filter. + For most efficient behavior, this should be a power of 2 plus 1 + (e.g, 129, 257, etc). The default is one more than the smallest + power of 2 that is not less than `numtaps`. `nfreqs` must be greater + than `numtaps`. + window : string or (string, float) or float, or None, optional + Window function to use. Default is "hamming". See + `scipy.signal.get_window` for the complete list of possible values. + If None, no window function is applied. + antisymmetric : bool, optional + Whether resulting impulse response is symmetric/antisymmetric. + See Notes for more details. + fs : float, optional + The sampling frequency of the signal. Each frequency in `cutoff` + must be between 0 and ``fs/2``. Default is 2. + + Returns + ------- + taps : ndarray + The filter coefficients of the FIR filter, as a 1-D array of length + `numtaps`. + + See Also + -------- + scipy.signal.firwin2 + firls + firwin + minimum_phase + remez + + Notes + ----- + From the given set of frequencies and gains, the desired response is + constructed in the frequency domain. The inverse FFT is applied to the + desired response to create the associated convolution kernel, and the + first `numtaps` coefficients of this kernel, scaled by `window`, are + returned. + The FIR filter will have linear phase. The type of filter is determined by + the value of 'numtaps` and `antisymmetric` flag. + There are four possible combinations: + + - odd `numtaps`, `antisymmetric` is False, type I filter is produced + - even `numtaps`, `antisymmetric` is False, type II filter is produced + - odd `numtaps`, `antisymmetric` is True, type III filter is produced + - even `numtaps`, `antisymmetric` is True, type IV filter is produced + + Magnitude response of all but type I filters are subjects to following + constraints: + + - type II -- zero at the Nyquist frequency + - type III -- zero at zero and Nyquist frequencies + - type IV -- zero at zero frequency + """ + nyq = 0.5 * fs + + if len(freq) != len(gain): + raise ValueError("freq and gain must be of same length.") + + if nfreqs is not None and numtaps >= nfreqs: + raise ValueError( + ( + "ntaps must be less than nfreqs, but firwin2 was " + "called with ntaps=%d and nfreqs=%s" + ) + % (numtaps, nfreqs) + ) + + if freq[0] != 0 or freq[-1] != nyq: + raise ValueError("freq must start with 0 and end with fs/2.") + d = cupy.diff(freq) + if (d < 0).any(): + raise ValueError("The values in freq must be nondecreasing.") + d2 = d[:-1] + d[1:] + if (d2 == 0).any(): + raise ValueError("A value in freq must not occur more than twice.") + if freq[1] == 0: + raise ValueError("Value 0 must not be repeated in freq") + if freq[-2] == nyq: + raise ValueError("Value fs/2 must not be repeated in freq") + + if antisymmetric: + if numtaps % 2 == 0: + ftype = 4 + else: + ftype = 3 + else: + if numtaps % 2 == 0: + ftype = 2 + else: + ftype = 1 + + if ftype == 2 and gain[-1] != 0.0: + raise ValueError( + "A Type II filter must have zero gain at the " "Nyquist frequency." + ) + elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0): + raise ValueError( + "A Type III filter must have zero gain at zero " + "and Nyquist frequencies." + ) + elif ftype == 4 and gain[0] != 0.0: + raise ValueError( + "A Type IV filter must have zero gain at zero " "frequency.") + + if nfreqs is None: + nfreqs = 1 + 2 ** int(math.ceil(math.log(numtaps, 2))) + + if (d == 0).any(): + # Tweak any repeated values in freq so that interp works. + freq = cupy.array(freq, copy=True) + eps = cupy.finfo(float).eps * nyq + for k in range(len(freq) - 1): + if freq[k] == freq[k + 1]: + freq[k] = freq[k] - eps + freq[k + 1] = freq[k + 1] + eps + # Check if freq is strictly increasing after tweak + d = cupy.diff(freq) + if (d <= 0).any(): + raise ValueError( + "freq cannot contain numbers that are too close " + "(within eps * (fs/2): " + "{}) to a repeated value".format(eps) + ) + + # Linearly interpolate the desired response on a uniform mesh `x`. + x = cupy.linspace(0.0, nyq, nfreqs) + fx = cupy.interp(x, freq, gain) + + # Adjust the phases of the coefficients so that the first `ntaps` of the + # inverse FFT are the desired filter coefficients. + shift = cupy.exp(-(numtaps - 1) / 2.0 * 1.0j * math.pi * x / nyq) + if ftype > 2: + shift *= 1j + + fx2 = fx * shift + + # Use irfft to compute the inverse FFT. + out_full = cupy.fft.irfft(fx2) + + if window is not None: + # Create the window to apply to the filter coefficients. + wind = get_window(window, numtaps, fftbins=False) + else: + wind = 1 + + # Keep only the first `numtaps` coefficients in `out`, and multiply by + # the window. + out = out_full[:numtaps] * wind + + if ftype == 3: + out[out.size // 2] = 0.0 + + return out + + +# Scipy <= 1.12 has a deprecated `nyq` argument (nyq = fs/2). +# Remove it here, to be forward-looking. +def firls(numtaps, bands, desired, weight=None, fs=2): + """ + FIR filter design using least-squares error minimization. + + Calculate the filter coefficients for the linear-phase finite + impulse response (FIR) filter which has the best approximation + to the desired frequency response described by `bands` and + `desired` in the least squares sense (i.e., the integral of the + weighted mean-squared error within the specified bands is + minimized). + + Parameters + ---------- + numtaps : int + The number of taps in the FIR filter. `numtaps` must be odd. + bands : array_like + A monotonic nondecreasing sequence containing the band edges in + Hz. All elements must be non-negative and less than or equal to + the Nyquist frequency given by `fs`/2. The bands are specified as + frequency pairs, thus, if using a 1D array, its length must be + even, e.g., `cupy.array([0, 1, 2, 3, 4, 5])`. Alternatively, the + bands can be specified as an nx2 sized 2D array, where n is the + number of bands, e.g, `cupy.array([[0, 1], [2, 3], [4, 5]])`. + All elements of `bands` must be monotonically nondecreasing, have + width > 0, and must not overlap. (This is not checked by the routine). + desired : array_like + A sequence the same size as `bands` containing the desired gain + at the start and end point of each band. + All elements must be non-negative (this is not checked by the routine). + weight : array_like, optional + A relative weighting to give to each band region when solving + the least squares problem. `weight` has to be half the size of + `bands`. + All elements must be non-negative (this is not checked by the routine). + fs : float, optional + The sampling frequency of the signal. Each frequency in `bands` + must be between 0 and ``fs/2`` (inclusive). Default is 2. + + Returns + ------- + coeffs : ndarray + Coefficients of the optimal (in a least squares sense) FIR filter. + + See Also + -------- + firwin + firwin2 + minimum_phase + remez + scipy.signal.firls + """ + nyq = 0.5 * fs + + numtaps = int(numtaps) + if numtaps % 2 == 0 or numtaps < 1: + raise ValueError("numtaps must be odd and >= 1") + M = (numtaps-1) // 2 + + # normalize bands 0->1 and make it 2 columns + nyq = float(nyq) + if nyq <= 0: + raise ValueError('nyq must be positive, got %s <= 0.' % nyq) + bands = cupy.asarray(bands).flatten() / nyq + if len(bands) % 2 != 0: + raise ValueError("bands must contain frequency pairs.") + if (bands < 0).any() or (bands > 1).any(): + raise ValueError("bands must be between 0 and 1 relative to Nyquist") + bands.shape = (-1, 2) + + # check remaining params + desired = cupy.asarray(desired).flatten() + if bands.size != desired.size: + raise ValueError("desired must have one entry per frequency, got %s " + "gains for %s frequencies." + % (desired.size, bands.size)) + desired.shape = (-1, 2) + # if (cupy.diff(bands) <= 0).any() or (cupy.diff(bands[:, 0]) < 0).any(): + # raise ValueError("bands must be monotonically nondecreasing and have" + # " width > 0.") + # if (bands[:-1, 1] > bands[1:, 0]).any(): + # raise ValueError("bands must not overlap.") + # if (desired < 0).any(): + # raise ValueError("desired must be non-negative.") + if weight is None: + weight = cupy.ones(len(desired)) + weight = cupy.asarray(weight).flatten() + if len(weight) != len(desired): + raise ValueError("weight must be the same size as the number of " + "band pairs ({}).".format(len(bands))) + # if (weight < 0).any(): + # raise ValueError("weight must be non-negative.") + + # Set up the linear matrix equation to be solved, Qa = b + + # We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n) + # where Q1(k,n)=q(k-n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel. + + # We omit the factor of 0.5 above, instead adding it during coefficient + # calculation. + + # We also omit the 1/π from both Q and b equations, as they cancel + # during solving. + + # We have that: + # q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π) + # Using our normalization ω=πf and with a constant weight W over each + # interval f1->f2 we get: + # q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf + # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). + n = cupy.arange(numtaps)[:, cupy.newaxis, cupy.newaxis] + q = cupy.dot(cupy.diff(cupy.sinc(bands * n) * + bands, axis=2)[:, :, 0], weight) + + # Now we assemble our sum of Toeplitz and Hankel + Q1 = toeplitz(q[:M+1]) + Q2 = hankel(q[:M+1], q[M:]) + Q = Q1 + Q2 + + # Now for b(n) we have that: + # b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π) + # Using our normalization ω=πf and with a constant weight W over each + # interval and a linear term for D(ω) we get (over each f1->f2 interval): + # b(n) = W ∫ (mf+c)cos(πnf)df + # = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2 + # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). + n = n[:M + 1] # only need this many coefficients here + # Choose m and c such that we are at the start and end weights + m = (cupy.diff(desired, axis=1) / cupy.diff(bands, axis=1)) + c = desired[:, [0]] - bands[:, [0]] * m + b = bands * (m*bands + c) * cupy.sinc(bands * n) + # Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0 + b[0] -= m * bands * bands / 2. + b[1:] += m * cupy.cos(n[1:] * cupy.pi * bands) / (cupy.pi * n[1:]) ** 2 + b = cupy.diff(b, axis=2)[:, :, 0] @ weight + + # Now we can solve the equation : XXX CuPy failure modes (?) + with cupyx.errstate(linalg="raise"): + try: + a = solve(Q, b) + except LinAlgError: + # in case Q is rank deficient + a = lstsq(Q, b, rcond=None)[0] + + # XXX: scipy.signal does this: + # try: # try the fast way + # with warnings.catch_warnings(record=True) as w: + # warnings.simplefilter('always') + # a = solve(Q, b) + # for ww in w: + # if (ww.category == LinAlgWarning and + # str(ww.message).startswith('Ill-conditioned matrix')): + # raise LinAlgError(str(ww.message)) + # except LinAlgError: # in case Q is rank deficient + # a = lstsq(Q, b)[0] + + # make coefficients symmetric (linear phase) + coeffs = cupy.hstack((a[:0:-1], 2 * a[0], a[1:])) + return coeffs + + +def _dhtm(mag): + """Compute the modified 1-D discrete Hilbert transform + + Parameters + ---------- + mag : ndarray + The magnitude spectrum. Should be 1-D with an even length, and + preferably a fast length for FFT/IFFT. + """ + # Adapted based on code by Niranjan Damera-Venkata, + # Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`) + sig = cupy.zeros(len(mag)) + # Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5 + midpt = len(mag) // 2 + sig[1:midpt] = 1 + sig[midpt + 1:] = -1 + # eventually if we want to support complex filters, we will need a + # cupy.abs() on the mag inside the log, and should remove the .real + recon = ifft(mag * cupy.exp(fft(sig * ifft(cupy.log(mag))))).real + return recon + + +def minimum_phase(h, method='homomorphic', n_fft=None): + """Convert a linear-phase FIR filter to minimum phase + + Parameters + ---------- + h : array + Linear-phase FIR filter coefficients. + method : {'hilbert', 'homomorphic'} + The method to use: + + 'homomorphic' (default) + This method [4]_ [5]_ works best with filters with an + odd number of taps, and the resulting minimum phase filter + will have a magnitude response that approximates the square + root of the original filter's magnitude response. + + 'hilbert' + This method [1]_ is designed to be used with equiripple + filters (e.g., from `remez`) with unity or zero gain + regions. + + n_fft : int + The number of points to use for the FFT. Should be at least a + few times larger than the signal length (see Notes). + + Returns + ------- + h_minimum : array + The minimum-phase version of the filter, with length + ``(length(h) + 1) // 2``. + + See Also + -------- + scipy.signal.minimum_phase + + Notes + ----- + Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection + of an FFT length to estimate the complex cepstrum of the filter. + + In the case of the Hilbert method, the deviation from the ideal + spectrum ``epsilon`` is related to the number of stopband zeros + ``n_stop`` and FFT length ``n_fft`` as:: + + epsilon = 2. * n_stop / n_fft + + For example, with 100 stopband zeros and a FFT length of 2048, + ``epsilon = 0.0976``. If we conservatively assume that the number of + stopband zeros is one less than the filter length, we can take the FFT + length to be the next power of 2 that satisfies ``epsilon=0.01`` as:: + + n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) + + This gives reasonable results for both the Hilbert and homomorphic + methods, and gives the value used when ``n_fft=None``. + + Alternative implementations exist for creating minimum-phase filters, + including zero inversion [2]_ and spectral factorization [3]_ [4]_ [5]_. + For more information, see: + + http://dspguru.com/dsp/howtos/how-to-design-minimum-phase-fir-filters + + References + ---------- + .. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and + complex minimum phase digital FIR filters," Acoustics, Speech, + and Signal Processing, 1999. Proceedings., 1999 IEEE International + Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3. + DOI:10.1109/ICASSP.1999.756179 + .. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR + filters by direct factorization," Signal Processing, + vol. 10, no. 4, pp. 369-383, Jun. 1986. + .. [3] T. Saramaki, "Finite Impulse Response Filter Design," in + Handbook for Digital Signal Processing, chapter 4, + New York: Wiley-Interscience, 1993. + .. [4] J. S. Lim, Advanced Topics in Signal Processing. + Englewood Cliffs, N.J.: Prentice Hall, 1988. + .. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck, + "Discrete-Time Signal Processing," 2nd edition. + Upper Saddle River, N.J.: Prentice Hall, 1999. + + """ # noqa + if cupy.iscomplexobj(h): + raise ValueError('Complex filters not supported') + if h.ndim != 1 or h.size <= 2: + raise ValueError('h must be 1-D and at least 2 samples long') + n_half = len(h) // 2 + if not cupy.allclose(h[-n_half:][::-1], h[:n_half]): + import warnings + warnings.warn('h does not appear to by symmetric, conversion may ' + 'fail', RuntimeWarning) + if not isinstance(method, str) or method not in \ + ('homomorphic', 'hilbert',): + raise ValueError('method must be "homomorphic" or "hilbert", got %r' + % (method,)) + if n_fft is None: + n_fft = 2 ** int(cupy.ceil(cupy.log2(2 * (len(h) - 1) / 0.01))) + n_fft = int(n_fft) + if n_fft < len(h): + raise ValueError('n_fft must be at least len(h)==%s' % len(h)) + if method == 'hilbert': + w = cupy.arange(n_fft) * (2 * cupy.pi / n_fft * n_half) + H = cupy.real(fft(h, n_fft) * cupy.exp(1j * w)) + dp = max(H) - 1 + ds = 0 - min(H) + S = 4. / (cupy.sqrt(1 + dp + ds) + cupy.sqrt(1 - dp + ds)) ** 2 + H += ds + H *= S + H = cupy.sqrt(H, out=H) + H += 1e-10 # ensure that the log does not explode + h_minimum = _dhtm(H) + else: # method == 'homomorphic' + # zero-pad; calculate the DFT + h_temp = cupy.abs(fft(h, n_fft)) + # take 0.25*log(|H|**2) = 0.5*log(|H|) + h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up + cupy.log(h_temp, out=h_temp) + h_temp *= 0.5 + # IDFT + h_temp = ifft(h_temp).real + # multiply pointwise by the homomorphic filter + # lmin[n] = 2u[n] - d[n] + win = cupy.zeros(n_fft) + win[0] = 1 + stop = (len(h) + 1) // 2 + win[1:stop] = 2 + if len(h) % 2: + win[stop] = 1 + h_temp *= win + h_temp = ifft(cupy.exp(fft(h_temp))) + h_minimum = h_temp.real + n_out = n_half + len(h) % 2 + return h_minimum[:n_out] diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_iir_filter_conversions.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_iir_filter_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..82a0e0af5e28b21279d200d3f5aeab147ea773e5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_iir_filter_conversions.py @@ -0,0 +1,2364 @@ +""" IIR filter conversion utilities. + +Split off _filter_design.py +""" +import warnings +import math +from math import pi, prod + +import cupy +from cupyx.scipy.special import binom as comb +import cupyx.scipy.special as special +from cupyx.scipy.signal import _optimize + +from cupyx.scipy.signal._polyutils import roots, poly +from cupyx.scipy.signal._lti_conversion import abcd_normalize + + +class BadCoefficients(UserWarning): + """Warning about badly conditioned filter coefficients""" + pass + + +def _trim_zeros(filt, trim='fb'): + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/function_base.py#L1800-L1850 + + first = 0 + if 'f' in trim: + for i in filt: + if i != 0.: + break + else: + first = first + 1 + + last = len(filt) + if 'b' in trim: + for i in filt[::-1]: + if i != 0.: + break + else: + last = last - 1 + return filt[first:last] + + +def _align_nums(nums): + """Aligns the shapes of multiple numerators. + + Given an array of numerator coefficient arrays [[a_1, a_2,..., + a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator + arrays with zero's so that all numerators have the same length. Such + alignment is necessary for functions like 'tf2ss', which needs the + alignment when dealing with SIMO transfer functions. + + Parameters + ---------- + nums: array_like + Numerator or list of numerators. Not necessarily with same length. + + Returns + ------- + nums: array + The numerator. If `nums` input was a list of numerators then a 2-D + array with padded zeros for shorter numerators is returned. Otherwise + returns ``np.asarray(nums)``. + """ + try: + # The statement can throw a ValueError if one + # of the numerators is a single digit and another + # is array-like e.g. if nums = [5, [1, 2, 3]] + nums = cupy.asarray(nums) + return nums + + except ValueError: + nums = [cupy.atleast_1d(num) for num in nums] + max_width = max(num.size for num in nums) + + # pre-allocate + aligned_nums = cupy.zeros((len(nums), max_width)) + + # Create numerators with padded zeros + for index, num in enumerate(nums): + aligned_nums[index, -num.size:] = num + + return aligned_nums + + +def _polycoeffs_from_zeros(zeros, tol=10): + # a clone of numpy.poly, simplified + dtyp = (cupy.complex128 + if cupy.issubdtype(zeros.dtype, cupy.complexfloating) + else cupy.float64) + a = cupy.ones(1, dtype=dtyp) + for z in zeros: + a = cupy.convolve(a, cupy.r_[1, -z], mode='full') + + # Use real output if possible. + if dtyp == cupy.complex128: + mask = cupy.abs(a.imag) < tol * cupy.finfo(a.dtype).eps + a.imag[mask] = 0.0 + if mask.shape[0] == a.shape[0]: + # all imag parts were fp noise + a = a.real.copy() + else: + # if all cmplx roots are complex conj, the coefficients are real + pos_roots = z[z.imag > 0] + neg_roots = z[z.imag < 0] + if pos_roots.shape[0] == neg_roots.shape[0]: + neg_roots = neg_roots.copy() + neg_roots.sort() + pos_roots = pos_roots.copy() + pos_roots.sort() + if (neg_roots == pos_roots.conj()).all(): + a = a.real.copy() + return a + + +def _nearest_real_complex_idx(fro, to, which): + """Get the next closest real or complex element based on distance""" + assert which in ('real', 'complex', 'any') + order = cupy.argsort(cupy.abs(fro - to)) + if which == 'any': + return order[0] + else: + mask = cupy.isreal(fro[order]) + if which == 'complex': + mask = ~mask + return order[cupy.nonzero(mask)[0][0]] + + +def _single_zpksos(z, p, k): + """Create one second-order section from up to two zeros and poles""" + sos = cupy.zeros(6) + b, a = zpk2tf(cupy.asarray(z), cupy.asarray(p), k) + sos[3-len(b):3] = b + sos[6-len(a):6] = a + return sos + + +def zpk2sos(z, p, k, pairing=None, *, analog=False): + """Return second-order sections from zeros, poles, and gain of a system + + Parameters + ---------- + z : array_like + Zeros of the transfer function. + p : array_like + Poles of the transfer function. + k : float + System gain. + pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional + The method to use to combine pairs of poles and zeros into sections. + If analog is False and pairing is None, pairing is set to 'nearest'; + if analog is True, pairing must be 'minimal', and is set to that if + it is None. + analog : bool, optional + If True, system is analog, otherwise discrete. + + Returns + ------- + sos : ndarray + Array of second-order filter coefficients, with shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + See Also + -------- + sosfilt + scipy.signal.zpk2sos + + """ + if pairing is None: + pairing = 'minimal' if analog else 'nearest' + + valid_pairings = ['nearest', 'keep_odd', 'minimal'] + if pairing not in valid_pairings: + raise ValueError('pairing must be one of %s, not %s' + % (valid_pairings, pairing)) + + if analog and pairing != 'minimal': + raise ValueError('for analog zpk2sos conversion, ' + 'pairing must be "minimal"') + + if len(z) == len(p) == 0: + if not analog: + return cupy.array([[k, 0., 0., 1., 0., 0.]]) + else: + return cupy.array([[0., 0., k, 0., 0., 1.]]) + + if pairing != 'minimal': + # ensure we have the same number of poles and zeros, and make copies + p = cupy.concatenate((p, cupy.zeros(max(len(z) - len(p), 0)))) + z = cupy.concatenate((z, cupy.zeros(max(len(p) - len(z), 0)))) + n_sections = (max(len(p), len(z)) + 1) // 2 + + if len(p) % 2 == 1 and pairing == 'nearest': + p = cupy.concatenate((p, cupy.zeros(1))) + z = cupy.concatenate((z, cupy.zeros(1))) + assert len(p) == len(z) + else: + if len(p) < len(z): + raise ValueError('for analog zpk2sos conversion, ' + 'must have len(p)>=len(z)') + + n_sections = (len(p) + 1) // 2 + + # Ensure we have complex conjugate pairs + # (note that _cplxreal only gives us one element of each complex pair): + z = cupy.concatenate(_cplxreal(z)) + p = cupy.concatenate(_cplxreal(p)) + if not cupy.isreal(k): + raise ValueError('k must be real') + k = k.real + + if not analog: + # digital: "worst" is the closest to the unit circle + def idx_worst(p): + return cupy.argmin(cupy.abs(1 - cupy.abs(p))) + else: + # analog: "worst" is the closest to the imaginary axis + def idx_worst(p): + return cupy.argmin(cupy.abs(cupy.real(p))) + + sos = cupy.zeros((n_sections, 6)) + + # Construct the system, reversing order so the "worst" are last + for si in range(n_sections-1, -1, -1): + # Select the next "worst" pole + p1_idx = idx_worst(p) + p1 = p[p1_idx] + p = cupy.delete(p, p1_idx) + + # Pair that pole with a zero + + if cupy.isreal(p1) and cupy.isreal(p).sum() == 0: + # Special case (1): last remaining real pole + if pairing != 'minimal': + z1_idx = _nearest_real_complex_idx(z, p1, 'real') + z1 = z[z1_idx] + z = cupy.delete(z, z1_idx) + sos[si] = _single_zpksos(cupy.r_[z1, 0], cupy.r_[p1, 0], 1) + elif len(z) > 0: + z1_idx = _nearest_real_complex_idx(z, p1, 'real') + z1 = z[z1_idx] + z = cupy.delete(z, z1_idx) + sos[si] = _single_zpksos([z1], [p1], 1) + else: + sos[si] = _single_zpksos([], [p1], 1) + + elif (len(p) + 1 == len(z) + and not cupy.isreal(p1) + and cupy.isreal(p).sum() == 1 + and cupy.isreal(z).sum() == 1): + + # Special case (2): there's one real pole and one real zero + # left, and an equal number of poles and zeros to pair up. + # We *must* pair with a complex zero + + z1_idx = _nearest_real_complex_idx(z, p1, 'complex') + z1 = z[z1_idx] + z = cupy.delete(z, z1_idx) + sos[si] = _single_zpksos( + cupy.r_[z1, z1.conj()], cupy.r_[p1, p1.conj()], 1) + + else: + if cupy.isreal(p1): + prealidx = cupy.flatnonzero(cupy.isreal(p)) + p2_idx = prealidx[idx_worst(p[prealidx])] + p2 = p[p2_idx] + p = cupy.delete(p, p2_idx) + else: + p2 = p1.conj() + + # find closest zero + if len(z) > 0: + z1_idx = _nearest_real_complex_idx(z, p1, 'any') + z1 = z[z1_idx] + z = cupy.delete(z, z1_idx) + + if not cupy.isreal(z1): + sos[si] = _single_zpksos( + cupy.r_[z1, z1.conj()], cupy.r_[p1, p2], 1) + else: + if len(z) > 0: + z2_idx = _nearest_real_complex_idx(z, p1, 'real') + z2 = z[z2_idx] + assert cupy.isreal(z2) + z = cupy.delete(z, z2_idx) + sos[si] = _single_zpksos(cupy.r_[z1, z2], [p1, p2], 1) + else: + sos[si] = _single_zpksos([z1], [p1, p2], 1) + else: + # no more zeros + sos[si] = _single_zpksos([], [p1, p2], 1) + + assert len(p) == len(z) == 0 # we've consumed all poles and zeros + del p, z + + # put gain in first sos + sos[0][:3] *= k + return sos + + +def _cplxreal(z, tol=None): + """ + Split into complex and real parts, combining conjugate pairs. + + The 1-D input vector `z` is split up into its complex (zc) and real (zr) + elements. Every complex element must be part of a complex-conjugate pair, + which are combined into a single number (with positive imaginary part) in + the output. Two complex numbers are considered a conjugate pair if their + real and imaginary parts differ in magnitude by less than ``tol * abs(z)``. + + Parameters + ---------- + z : array_like + Vector of complex numbers to be sorted and split + tol : float, optional + Relative tolerance for testing realness and conjugate equality. + Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for + float64) + + Returns + ------- + zc : ndarray + Complex elements of `z`, with each pair represented by a single value + having positive imaginary part, sorted first by real part, and then + by magnitude of imaginary part. The pairs are averaged when combined + to reduce error. + zr : ndarray + Real elements of `z` (those having imaginary part less than + `tol` times their magnitude), sorted by value. + + Raises + ------ + ValueError + If there are any complex numbers in `z` for which a conjugate + cannot be found. + + See Also + -------- + scipy.signal.cmplxreal + + Examples + -------- + >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] + >>> zc, zr = _cplxreal(a) + >>> print(zc) + [ 1.+1.j 2.+1.j 2.+1.j 2.+2.j] + >>> print(zr) + [ 1. 3. 4.] + """ + + z = cupy.atleast_1d(z) + if z.size == 0: + return z, z + elif z.ndim != 1: + raise ValueError('_cplxreal only accepts 1-D input') + + if tol is None: + # Get tolerance from dtype of input + tol = 100 * cupy.finfo((1.0 * z).dtype).eps + + # Sort by real part, magnitude of imaginary part (speed up further sorting) + z = z[cupy.lexsort(cupy.array([abs(z.imag), z.real]))] + + # Split reals from conjugate pairs + real_indices = abs(z.imag) <= tol * abs(z) + zr = z[real_indices].real + + if len(zr) == len(z): + # Input is entirely real + return cupy.array([]), zr + + # Split positive and negative halves of conjugates + z = z[~real_indices] + zp = z[z.imag > 0] + zn = z[z.imag < 0] + + if len(zp) != len(zn): + raise ValueError('Array contains complex value with no matching ' + 'conjugate.') + + # Find runs of (approximately) the same real part + same_real = cupy.diff(zp.real) <= tol * abs(zp[:-1]) + diffs = cupy.diff(cupy.r_[0, same_real, 0]) + run_starts = cupy.nonzero(diffs > 0)[0] + run_stops = cupy.nonzero(diffs < 0)[0] + + # Sort each run by their imaginary parts + for i in range(len(run_starts)): + start = run_starts[i] + stop = run_stops[i] + 1 + for chunk in (zp[start:stop], zn[start:stop]): + chunk[...] = chunk[cupy.lexsort(cupy.array([abs(chunk.imag)]))] + + # Check that negatives match positives + if any(abs(zp - zn.conj()) > tol * abs(zn)): + raise ValueError('Array contains complex value with no matching ' + 'conjugate.') + + # Average out numerical inaccuracy in real vs imag parts of pairs + zc = (zp + zn.conj()) / 2 + + return zc, zr + + +def normalize(b, a): + """Normalize numerator/denominator of a continuous-time transfer function. + + If values of `b` are too close to 0, they are removed. In that case, a + BadCoefficients warning is emitted. + + Parameters + ---------- + b: array_like + Numerator of the transfer function. Can be a 2-D array to normalize + multiple transfer functions. + a: array_like + Denominator of the transfer function. At most 1-D. + + Returns + ------- + num: array + The numerator of the normalized transfer function. At least a 1-D + array. A 2-D array if the input `num` is a 2-D array. + den: 1-D array + The denominator of the normalized transfer function. + + Notes + ----- + Coefficients for both the numerator and denominator should be specified in + descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as + ``[1, 3, 5]``). + + See Also + -------- + scipy.signal.normalize + + """ + num, den = b, a + + den = cupy.atleast_1d(den) + num = cupy.atleast_2d(_align_nums(num)) + + if den.ndim != 1: + raise ValueError("Denominator polynomial must be rank-1 array.") + if num.ndim > 2: + raise ValueError("Numerator polynomial must be rank-1 or" + " rank-2 array.") + if cupy.all(den == 0): + raise ValueError("Denominator must have at least on nonzero element.") + + # Trim leading zeros in denominator, leave at least one. + den = _trim_zeros(den, 'f') + + # Normalize transfer function + num, den = num / den[0], den / den[0] + + # Count numerator columns that are all zero + leading_zeros = 0 + for col in num.T: + if cupy.allclose(col, 0, atol=1e-14): + leading_zeros += 1 + else: + break + + # Trim leading zeros of numerator + if leading_zeros > 0: + warnings.warn("Badly conditioned filter coefficients (numerator): the " + "results may be meaningless", BadCoefficients) + # Make sure at least one column remains + if leading_zeros == num.shape[1]: + leading_zeros -= 1 + num = num[:, leading_zeros:] + + # Squeeze first dimension if singular + if num.shape[0] == 1: + num = num[0, :] + + return num, den + + +def _relative_degree(z, p): + """ + Return relative degree of transfer function from zeros and poles + """ + degree = len(p) - len(z) + if degree < 0: + raise ValueError("Improper transfer function. " + "Must have at least as many poles as zeros.") + else: + return degree + + +def bilinear_zpk(z, p, k, fs): + r""" + Return a digital IIR filter from an analog one using a bilinear transform. + + Transform a set of poles and zeros from the analog s-plane to the digital + z-plane using Tustin's method, which substitutes ``2*fs*(z-1) / (z+1)`` for + ``s``, maintaining the shape of the frequency response. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + fs : float + Sample rate, as ordinary frequency (e.g., hertz). No prewarping is + done in this function. + + Returns + ------- + z : ndarray + Zeros of the transformed digital filter transfer function. + p : ndarray + Poles of the transformed digital filter transfer function. + k : float + System gain of the transformed digital filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, lp2bs_zpk + bilinear + scipy.signal.bilinear_zpk + + """ + z = cupy.atleast_1d(z) + p = cupy.atleast_1d(p) + + degree = _relative_degree(z, p) + + fs2 = 2.0 * fs + + # Bilinear transform the poles and zeros + z_z = (fs2 + z) / (fs2 - z) + p_z = (fs2 + p) / (fs2 - p) + + # Any zeros that were at infinity get moved to the Nyquist frequency + z_z = cupy.append(z_z, -cupy.ones(degree)) + + # Compensate for gain change + k_z = k * (cupy.prod(fs2 - z) / cupy.prod(fs2 - p)).real + + return z_z, p_z, k_z + + +def lp2lp_zpk(z, p, k, wo=1.0): + r""" + Transform a lowpass filter prototype to a different frequency. + + Return an analog low-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, + using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired cutoff, as angular frequency (e.g., rad/s). + Defaults to no change. + + Returns + ------- + z : ndarray + Zeros of the transformed low-pass filter transfer function. + p : ndarray + Poles of the transformed low-pass filter transfer function. + k : float + System gain of the transformed low-pass filter. + + See Also + -------- + lp2hp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear + lp2lp + scipy.signal.lp2lp_zpk + + """ + z = cupy.atleast_1d(z) + p = cupy.atleast_1d(p) + wo = float(wo) # Avoid int wraparound + + degree = _relative_degree(z, p) + + # Scale all points radially from origin to shift cutoff frequency + z_lp = wo * z + p_lp = wo * p + + # Each shifted pole decreases gain by wo, each shifted zero increases it. + # Cancel out the net change to keep overall gain the same + k_lp = k * wo**degree + + return z_lp, p_lp, k_lp + + +def lp2hp_zpk(z, p, k, wo=1.0): + r""" + Transform a lowpass filter prototype to a highpass filter. + + Return an analog high-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, + using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired cutoff, as angular frequency (e.g., rad/s). + Defaults to no change. + + Returns + ------- + z : ndarray + Zeros of the transformed high-pass filter transfer function. + p : ndarray + Poles of the transformed high-pass filter transfer function. + k : float + System gain of the transformed high-pass filter. + + See Also + -------- + lp2lp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear + lp2hp + scipy.signal.lp2hp_zpk + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{\omega_0}{s} + + This maintains symmetry of the lowpass and highpass responses on a + logarithmic scale. + + """ + z = cupy.atleast_1d(z) + p = cupy.atleast_1d(p) + wo = float(wo) + + degree = _relative_degree(z, p) + + # Invert positions radially about unit circle to convert LPF to HPF + # Scale all points radially from origin to shift cutoff frequency + z_hp = wo / z + p_hp = wo / p + + # If lowpass had zeros at infinity, inverting moves them to origin. + z_hp = cupy.append(z_hp, cupy.zeros(degree)) + + # Cancel out gain change caused by inversion + k_hp = k * cupy.real(cupy.prod(-z) / cupy.prod(-p)) + + return z_hp, p_hp, k_hp + + +def lp2bp_zpk(z, p, k, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandpass filter. + + Return an analog band-pass filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired passband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired passband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + z : ndarray + Zeros of the transformed band-pass filter transfer function. + p : ndarray + Poles of the transformed band-pass filter transfer function. + k : float + System gain of the transformed band-pass filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bs_zpk, bilinear + lp2bp + scipy.signal.lp2bp_zpk + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}} + + This is the "wideband" transformation, producing a passband with + geometric (log frequency) symmetry about `wo`. + + """ + z = cupy.atleast_1d(z) + p = cupy.atleast_1d(p) + wo = float(wo) + bw = float(bw) + + degree = _relative_degree(z, p) + + # Scale poles and zeros to desired bandwidth + z_lp = z * bw/2 + p_lp = p * bw/2 + + # Square root needs to produce complex result, not NaN + z_lp = z_lp.astype(complex) + p_lp = p_lp.astype(complex) + + # Duplicate poles and zeros and shift from baseband to +wo and -wo + z_bp = cupy.concatenate((z_lp + cupy.sqrt(z_lp**2 - wo**2), + z_lp - cupy.sqrt(z_lp**2 - wo**2))) + p_bp = cupy.concatenate((p_lp + cupy.sqrt(p_lp**2 - wo**2), + p_lp - cupy.sqrt(p_lp**2 - wo**2))) + + # Move degree zeros to origin, leaving degree zeros at infinity for BPF + z_bp = cupy.append(z_bp, cupy.zeros(degree)) + + # Cancel out gain change from frequency scaling + k_bp = k * bw**degree + + return z_bp, p_bp, k_bp + + +def lp2bs_zpk(z, p, k, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandstop filter. + + Return an analog band-stop filter with center frequency `wo` and + stopband width `bw` from an analog low-pass filter prototype with unity + cutoff frequency, using zeros, poles, and gain ('zpk') representation. + + Parameters + ---------- + z : array_like + Zeros of the analog filter transfer function. + p : array_like + Poles of the analog filter transfer function. + k : float + System gain of the analog filter transfer function. + wo : float + Desired stopband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired stopband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + z : ndarray + Zeros of the transformed band-stop filter transfer function. + p : ndarray + Poles of the transformed band-stop filter transfer function. + k : float + System gain of the transformed band-stop filter. + + See Also + -------- + lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, bilinear + lp2bs + scipy.signal.lp2bs_zpk + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2} + + This is the "wideband" transformation, producing a stopband with + geometric (log frequency) symmetry about `wo`. + + """ + z = cupy.atleast_1d(z) + p = cupy.atleast_1d(p) + wo = float(wo) + bw = float(bw) + + degree = _relative_degree(z, p) + + # Invert to a highpass filter with desired bandwidth + z_hp = (bw/2) / z + p_hp = (bw/2) / p + + # Square root needs to produce complex result, not NaN + z_hp = z_hp.astype(complex) + p_hp = p_hp.astype(complex) + + # Duplicate poles and zeros and shift from baseband to +wo and -wo + z_bs = cupy.concatenate((z_hp + cupy.sqrt(z_hp**2 - wo**2), + z_hp - cupy.sqrt(z_hp**2 - wo**2))) + p_bs = cupy.concatenate((p_hp + cupy.sqrt(p_hp**2 - wo**2), + p_hp - cupy.sqrt(p_hp**2 - wo**2))) + + # Move any zeros that were at infinity to the center of the stopband + z_bs = cupy.append(z_bs, cupy.full(degree, +1j*wo)) + z_bs = cupy.append(z_bs, cupy.full(degree, -1j*wo)) + + # Cancel out gain change caused by inversion + k_bs = k * cupy.real(cupy.prod(-z) / cupy.prod(-p)) + + return z_bs, p_bs, k_bs + + +def bilinear(b, a, fs=1.0): + r""" + Return a digital IIR filter from an analog one using a bilinear transform. + + Transform a set of poles and zeros from the analog s-plane to the digital + z-plane using Tustin's method, which substitutes ``2*fs*(z-1) / (z+1)`` for + ``s``, maintaining the shape of the frequency response. + + Parameters + ---------- + b : array_like + Numerator of the analog filter transfer function. + a : array_like + Denominator of the analog filter transfer function. + fs : float + Sample rate, as ordinary frequency (e.g., hertz). No prewarping is + done in this function. + + Returns + ------- + b : ndarray + Numerator of the transformed digital filter transfer function. + a : ndarray + Denominator of the transformed digital filter transfer function. + + See Also + -------- + lp2lp, lp2hp, lp2bp, lp2bs + bilinear_zpk + scipy.signal.bilinear + + """ + fs = float(fs) + a, b = map(cupy.atleast_1d, (a, b)) + D = a.shape[0] - 1 + N = b.shape[0] - 1 + + M = max(N, D) + Np, Dp = M, M + + bprime = cupy.empty(Np + 1, float) + aprime = cupy.empty(Dp + 1, float) + + # XXX (ev-br): worth turning into a ufunc invocation? (loops are short) + for j in range(Dp + 1): + val = 0.0 + for i in range(N + 1): + bNi = b[N - i] * (2 * fs)**i + for k in range(i + 1): + for s in range(M - i + 1): + if k + s == j: + val += comb(i, k) * comb(M - i, s) * bNi * (-1)**k + bprime[j] = cupy.real(val) + + for j in range(Dp + 1): + val = 0.0 + for i in range(D + 1): + aDi = a[D - i] * (2 * fs)**i + for k in range(i + 1): + for s in range(M - i + 1): + if k + s == j: + val += comb(i, k) * comb(M - i, s) * aDi * (-1)**k + aprime[j] = cupy.real(val) + + return normalize(bprime, aprime) + + +def lp2lp(b, a, wo=1.0): + r""" + Transform a lowpass filter prototype to a different frequency. + + Return an analog low-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, in + transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired cutoff, as angular frequency (e.g. rad/s). + Defaults to no change. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed low-pass filter. + a : array_like + Denominator polynomial coefficients of the transformed low-pass filter. + + See Also + -------- + lp2hp, lp2bp, lp2bs, bilinear + lp2lp_zpk + scipy.signal.lp2lp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s}{\omega_0} + + """ + a, b = map(cupy.atleast_1d, (a, b)) + try: + wo = float(wo) + except TypeError: + wo = float(wo[0]) + d = len(a) + n = len(b) + M = max(d, n) + pwo = wo ** cupy.arange(M - 1, -1, -1) + start1 = max((n - d, 0)) + start2 = max((d - n, 0)) + b = b * pwo[start1] / pwo[start2:] + a = a * pwo[start1] / pwo[start1:] + return normalize(b, a) + + +def lp2hp(b, a, wo=1.0): + r""" + Transform a lowpass filter prototype to a highpass filter. + + Return an analog high-pass filter with cutoff frequency `wo` + from an analog low-pass filter prototype with unity cutoff frequency, in + transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired cutoff, as angular frequency (e.g., rad/s). + Defaults to no change. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed high-pass filter. + a : array_like + Denominator polynomial coefficients of the transformed high-pass + filter. + + See Also + -------- + lp2lp, lp2bp, lp2bs, bilinear + lp2hp_zpk + scipy.signal.lp2hp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{\omega_0}{s} + + This maintains symmetry of the lowpass and highpass responses on a + logarithmic scale. + """ + a, b = map(cupy.atleast_1d, (a, b)) + try: + wo = float(wo) + except TypeError: + wo = float(wo[0]) + d = len(a) + n = len(b) + if wo != 1: + pwo = wo ** cupy.arange(max(d, n)) + else: + pwo = cupy.ones(max(d, n), b.dtype) + if d >= n: + outa = a[::-1] * pwo + outb = cupy.resize(b, (d,)) + outb[n:] = 0.0 + outb[:n] = b[::-1] * pwo[:n] + else: + outb = b[::-1] * pwo + outa = cupy.resize(a, (n,)) + outa[d:] = 0.0 + outa[:d] = a[::-1] * pwo[:d] + + return normalize(outb, outa) + + +def lp2bp(b, a, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandpass filter. + + Return an analog band-pass filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, in transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired passband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired passband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed band-pass filter. + a : array_like + Denominator polynomial coefficients of the transformed band-pass + filter. + + See Also + -------- + lp2lp, lp2hp, lp2bs, bilinear + lp2bp_zpk + scipy.signal.lp2bp + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}} + + This is the "wideband" transformation, producing a passband with + geometric (log frequency) symmetry about `wo`. + + """ + a, b = map(cupy.atleast_1d, (a, b)) + D = len(a) - 1 + N = len(b) - 1 + artype = cupy.mintypecode((a.dtype, b.dtype)) + ma = max(N, D) + Np = N + ma + Dp = D + ma + bprime = cupy.empty(Np + 1, artype) + aprime = cupy.empty(Dp + 1, artype) + wosq = wo * wo + for j in range(Np + 1): + val = 0.0 + for i in range(0, N + 1): + for k in range(0, i + 1): + if ma - i + 2 * k == j: + val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i + bprime[Np - j] = val + + for j in range(Dp + 1): + val = 0.0 + for i in range(0, D + 1): + for k in range(0, i + 1): + if ma - i + 2 * k == j: + val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i + aprime[Dp - j] = val + + return normalize(bprime, aprime) + + +def lp2bs(b, a, wo=1.0, bw=1.0): + r""" + Transform a lowpass filter prototype to a bandstop filter. + + Return an analog band-stop filter with center frequency `wo` and + bandwidth `bw` from an analog low-pass filter prototype with unity + cutoff frequency, in transfer function ('ba') representation. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + wo : float + Desired stopband center, as angular frequency (e.g., rad/s). + Defaults to no change. + bw : float + Desired stopband width, as angular frequency (e.g., rad/s). + Defaults to 1. + + Returns + ------- + b : array_like + Numerator polynomial coefficients of the transformed band-stop filter. + a : array_like + Denominator polynomial coefficients of the transformed band-stop + filter. + + See Also + -------- + lp2lp, lp2hp, lp2bp, bilinear + lp2bs_zpk + scipy.signal.lp2bs + + Notes + ----- + This is derived from the s-plane substitution + + .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2} + + This is the "wideband" transformation, producing a stopband with + geometric (log frequency) symmetry about `wo`. + """ + a, b = map(cupy.atleast_1d, (a, b)) + D = len(a) - 1 + N = len(b) - 1 + artype = cupy.mintypecode((a.dtype, b.dtype)) + M = max(N, D) + Np = M + M + Dp = M + M + bprime = cupy.empty(Np + 1, artype) + aprime = cupy.empty(Dp + 1, artype) + wosq = wo * wo + for j in range(Np + 1): + val = 0.0 + for i in range(0, N + 1): + for k in range(0, M - i + 1): + if i + 2 * k == j: + val += (comb(M - i, k) * b[N - i] * + (wosq) ** (M - i - k) * bw ** i) + bprime[Np - j] = val + + for j in range(Dp + 1): + val = 0.0 + for i in range(0, D + 1): + for k in range(0, M - i + 1): + if i + 2 * k == j: + val += (comb(M - i, k) * a[D - i] * + (wosq) ** (M - i - k) * bw ** i) + aprime[Dp - j] = val + + return normalize(bprime, aprime) + + +# ### LTI conversions ### + +def zpk2tf(z, p, k): + """ + Return polynomial transfer function representation from zeros and poles + + Parameters + ---------- + z : array_like + Zeros of the transfer function. + p : array_like + Poles of the transfer function. + k : float + System gain. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + scipy.signal.zpk2tf + """ + if z.ndim > 1: + raise NotImplementedError(f"zpk2tf: z.ndim = {z.ndim}.") + b = _polycoeffs_from_zeros(z) * k + a = _polycoeffs_from_zeros(p) + return b, a + + +def tf2zpk(b, a): + r"""Return zero, pole, gain (z, p, k) representation from a numerator, + denominator representation of a linear filter. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + + Returns + ------- + z : ndarray + Zeros of the transfer function. + p : ndarray + Poles of the transfer function. + k : float + System gain. + + Warning + ------- + This function may synchronize the device. + + See Also + -------- + scipy.signal.tf2zpk + + Notes + ----- + If some values of `b` are too close to 0, they are removed. In that case, + a BadCoefficients warning is emitted. + + The `b` and `a` arrays are interpreted as coefficients for positive, + descending powers of the transfer function variable. So the inputs + :math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]` + can represent an analog filter of the form: + + .. math:: + + H(s) = \frac + {b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M} + {a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N} + + or a discrete-time filter of the form: + + .. math:: + + H(z) = \frac + {b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M} + {a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N} + + This "positive powers" form is found more commonly in controls + engineering. If `M` and `N` are equal (which is true for all filters + generated by the bilinear transform), then this happens to be equivalent + to the "negative powers" discrete-time form preferred in DSP: + + .. math:: + + H(z) = \frac + {b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}} + {a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}} + + Although this is true for common filters, remember that this is not true + in the general case. If `M` and `N` are not equal, the discrete-time + transfer function coefficients must first be converted to the "positive + powers" form before finding the poles and zeros. + + """ + b, a = normalize(b, a) + b = (b + 0.0) / a[0] + a = (a + 0.0) / a[0] + k = b[0].copy() + b /= b[0] + z = roots(b) + p = roots(a) + return z, p, k + + +def tf2sos(b, a, pairing=None, *, analog=False): + """ + Return second-order sections from transfer function representation + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional + The method to use to combine pairs of poles and zeros into sections. + See `zpk2sos` for information and restrictions on `pairing` and + `analog` arguments. + analog : bool, optional + If True, system is analog, otherwise discrete. + + Returns + ------- + sos : ndarray + Array of second-order filter coefficients, with shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + See Also + -------- + scipy.signal.tf2sos + + Notes + ----- + It is generally discouraged to convert from TF to SOS format, since doing + so usually will not improve numerical precision errors. Instead, consider + designing filters in ZPK format and converting directly to SOS. TF is + converted to SOS by first converting to ZPK format, then converting + ZPK to SOS. + + """ + return zpk2sos(*tf2zpk(b, a), pairing=pairing, analog=analog) + + +def sos2tf(sos): + """ + Return a single transfer function from a series of second-order sections + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + scipy.signal.sos2tf + + """ + sos = cupy.asarray(sos) + result_type = sos.dtype + if result_type.kind in 'bui': + result_type = cupy.float64 + + b = cupy.array([1], dtype=result_type) + a = cupy.array([1], dtype=result_type) + n_sections = sos.shape[0] + for section in range(n_sections): + b = cupy.polymul(b, sos[section, :3]) + a = cupy.polymul(a, sos[section, 3:]) + return b, a + + +def sos2zpk(sos): + """ + Return zeros, poles, and gain of a series of second-order sections + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + z : ndarray + Zeros of the transfer function. + p : ndarray + Poles of the transfer function. + k : float + System gain. + + Notes + ----- + The number of zeros and poles returned will be ``n_sections * 2`` + even if some of these are (effectively) zero. + + See Also + -------- + scipy.signal.sos2zpk + + """ + n_sections = sos.shape[0] + z = cupy.zeros(n_sections*2, cupy.complex128) + p = cupy.zeros(n_sections*2, cupy.complex128) + k = 1. + for section in range(n_sections): + # XXX: may just solve a quadratic equation instead of tf2zpk + zpk = tf2zpk(sos[section, :3], sos[section, 3:]) + z[2*section:2*section + len(zpk[0])] = zpk[0] + p[2*section:2*section + len(zpk[1])] = zpk[1] + k *= zpk[2] + return z, p, k + + +def tf2ss(num, den): + r"""Transfer function to state-space representation. + + Parameters + ---------- + num, den : array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree. The + denominator needs to be at least as long as the numerator. + + Returns + ------- + A, B, C, D : ndarray + State space representation of the system, in controller canonical + form. + + See Also + -------- + scipy.signal.tf2ss + """ + # Controller canonical state-space representation. + # if M+1 = len(num) and K+1 = len(den) then we must have M <= K + # states are found by asserting that X(s) = U(s) / D(s) + # then Y(s) = N(s) * X(s) + # + # A, B, C, and D follow quite naturally. + # + num, den = normalize(num, den) # Strips zeros, checks arrays + nn = len(num.shape) + if nn == 1: + num = cupy.asarray([num], num.dtype) + M = num.shape[1] + K = len(den) + if M > K: + msg = "Improper transfer function. `num` is longer than `den`." + raise ValueError(msg) + if M == 0 or K == 0: # Null system + return (cupy.array([], float), + cupy.array([], float), + cupy.array([], float), + cupy.array([], float)) + + # pad numerator to have same number of columns has denominator + num = cupy.hstack((cupy.zeros((num.shape[0], K - M), num.dtype), num)) + + if num.shape[-1] > 0: + D = cupy.atleast_2d(num[:, 0]) + + else: + # We don't assign it an empty array because this system + # is not 'null'. It just doesn't have a non-zero D + # matrix. Thus, it should have a non-zero shape so that + # it can be operated on by functions like 'ss2tf' + D = cupy.array([[0]], float) + + if K == 1: + D = D.reshape(num.shape) + + return (cupy.zeros((1, 1)), cupy.zeros((1, D.shape[1])), + cupy.zeros((D.shape[0], 1)), D) + + frow = -cupy.array([den[1:]]) + A = cupy.r_[frow, cupy.eye(K - 2, K - 1)] + B = cupy.eye(K - 1, 1) + C = num[:, 1:] - cupy.outer(num[:, 0], den[1:]) + D = D.reshape((C.shape[0], B.shape[1])) + + return A, B, C, D + + +def ss2tf(A, B, C, D, input=0): + r"""State-space to transfer function. + + A, B, C, D defines a linear state-space system with `p` inputs, + `q` outputs, and `n` state variables. + + Parameters + ---------- + A : array_like + State (or system) matrix of shape ``(n, n)`` + B : array_like + Input matrix of shape ``(n, p)`` + C : array_like + Output matrix of shape ``(q, n)`` + D : array_like + Feedthrough (or feedforward) matrix of shape ``(q, p)`` + input : int, optional + For multiple-input systems, the index of the input to use. + + Returns + ------- + num : 2-D ndarray + Numerator(s) of the resulting transfer function(s). `num` has one row + for each of the system's outputs. Each row is a sequence representation + of the numerator polynomial. + den : 1-D ndarray + Denominator of the resulting transfer function(s). `den` is a sequence + representation of the denominator polynomial. + + Warning + ------- + This function may synchronize the device. + + See Also + -------- + scipy.signal.ss2tf + + """ + # transfer function is C (sI - A)**(-1) B + D + + # Check consistency and make them all rank-2 arrays + A, B, C, D = abcd_normalize(A, B, C, D) + + nout, nin = D.shape + if input >= nin: + raise ValueError("System does not have the input specified.") + + # make SIMO from possibly MIMO system. + B = B[:, input:input + 1] + D = D[:, input:input + 1] + + try: + den = poly(A) + except ValueError: + den = 1 + + if (prod(B.shape) == 0) and (prod(C.shape) == 0): + num = cupy.ravel(D) + if (prod(D.shape) == 0) and (prod(A.shape) == 0): + den = [] + return num, den + + num_states = A.shape[0] + type_test = A[:, 0] + B[:, 0] + C[0, :] + D + 0.0 + num = cupy.empty((nout, num_states + 1), type_test.dtype) + for k in range(nout): + Ck = cupy.atleast_2d(C[k, :]) + num[k] = poly(A - B @ Ck) + (D[k] - 1) * den + + return num, den + + +def zpk2ss(z, p, k): + """Zero-pole-gain representation to state-space representation + + Parameters + ---------- + z, p : sequence + Zeros and poles. + k : float + System gain. + + Returns + ------- + A, B, C, D : ndarray + State space representation of the system, in controller canonical + form. + + See Also + -------- + scipy.signal.zpk2ss + + """ + return tf2ss(*zpk2tf(z, p, k)) + + +def ss2zpk(A, B, C, D, input=0): + """State-space representation to zero-pole-gain representation. + + A, B, C, D defines a linear state-space system with `p` inputs, + `q` outputs, and `n` state variables. + + Parameters + ---------- + A : array_like + State (or system) matrix of shape ``(n, n)`` + B : array_like + Input matrix of shape ``(n, p)`` + C : array_like + Output matrix of shape ``(q, n)`` + D : array_like + Feedthrough (or feedforward) matrix of shape ``(q, p)`` + input : int, optional + For multiple-input systems, the index of the input to use. + + Returns + ------- + z, p : sequence + Zeros and poles. + k : float + System gain. + + See Also + -------- + scipy.signal.ss2zpk + + """ + return tf2zpk(*ss2tf(A, B, C, D, input=input)) + + +# ### Low-level analog filter prototypes ### + +# TODO (ev-br): move to a better place (_filter_design.py (?)) + +def buttap(N): + """Return (z,p,k) for analog prototype of Nth-order Butterworth filter. + + The filter will have an angular (e.g., rad/s) cutoff frequency of 1. + + See Also + -------- + butter : Filter design function using this prototype + scipy.signal.buttap + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + z = cupy.array([]) + m = cupy.arange(-N+1, N, 2) + # Middle value is 0 to ensure an exactly real pole + p = -cupy.exp(1j * pi * m / (2 * N)) + k = 1 + return z, p, k + + +def cheb1ap(N, rp): + """ + Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter. + + The returned filter prototype has `rp` decibels of ripple in the passband. + + The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, + defined as the point at which the gain first drops below ``-rp``. + + See Also + -------- + cheby1 : Filter design function using this prototype + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero error + # Even order filters have DC gain of -rp dB + return cupy.array([]), cupy.array([]), 10**(-rp/20) + z = cupy.array([]) + + # Ripple factor (epsilon) + eps = cupy.sqrt(10 ** (0.1 * rp) - 1.0) + mu = 1.0 / N * cupy.arcsinh(1 / eps) + + # Arrange poles in an ellipse on the left half of the S-plane + m = cupy.arange(-N+1, N, 2) + theta = pi * m / (2*N) + p = -cupy.sinh(mu + 1j*theta) + + k = cupy.prod(-p, axis=0).real + if N % 2 == 0: + k = k / cupy.sqrt(1 + eps * eps) + + return z, p, k + + +def cheb2ap(N, rs): + """ + Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter. + + The returned filter prototype has `rs` decibels of ripple in the stopband. + + The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, + defined as the point at which the gain first reaches ``-rs``. + + See Also + -------- + cheby2 : Filter design function using this prototype + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero warning + return cupy.array([]), cupy.array([]), 1 + + # Ripple factor (epsilon) + de = 1.0 / cupy.sqrt(10 ** (0.1 * rs) - 1) + mu = cupy.arcsinh(1.0 / de) / N + + if N % 2: + m = cupy.concatenate((cupy.arange(-N+1, 0, 2), + cupy.arange(2, N, 2))) + else: + m = cupy.arange(-N+1, N, 2) + + z = -cupy.conjugate(1j / cupy.sin(m * pi / (2.0 * N))) + + # Poles around the unit circle like Butterworth + p = -cupy.exp(1j * pi * cupy.arange(-N+1, N, 2) / (2 * N)) + # Warp into Chebyshev II + p = cupy.sinh(mu) * p.real + 1j * cupy.cosh(mu) * p.imag + p = 1.0 / p + + k = (cupy.prod(-p, axis=0) / cupy.prod(-z, axis=0)).real + return z, p, k + + +# ### Elliptic filter prototype ### + +_POW10_LOG10 = math.log(10) + + +def _pow10m1(x): + """10 ** x - 1 for x near 0""" + return cupy.expm1(_POW10_LOG10 * x) + + +def _ellipdeg(n, m1): + """Solve degree equation using nomes + + Given n, m1, solve + n * K(m) / K'(m) = K1(m1) / K1'(m1) + for m + + See [1], Eq. (49) + + References + ---------- + .. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design", + https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf + """ + # number of terms in solving degree equation + _ELLIPDEG_MMAX = 7 + + K1 = special.ellipk(m1) + K1p = special.ellipkm1(m1) + + q1 = cupy.exp(-pi * K1p / K1) + q = q1 ** (1/n) + + mnum = cupy.arange(_ELLIPDEG_MMAX + 1) + mden = cupy.arange(1, _ELLIPDEG_MMAX + 2) + + num = (q ** (mnum * (mnum+1))).sum() + den = 1 + 2 * (q ** (mden**2)).sum() + + return 16 * q * (num / den) ** 4 + + +def _arc_jac_sn(w, m): + """Inverse Jacobian elliptic sn + + Solve for z in w = sn(z, m) + + Parameters + ---------- + w : complex scalar + argument + + m : scalar + modulus; in interval [0, 1] + + + See [1], Eq. (56) + + References + ---------- + .. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design", + https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf + + """ + # Maximum number of iterations in Landen transformation recursion + # sequence. 10 is conservative; unit tests pass with 4, Orfanidis + # (see _arc_jac_cn [1]) suggests 5. + _ARC_JAC_SN_MAXITER = 10 + + def _complement(kx): + # (1-k**2) ** 0.5; the expression below + # works for small kx + return ((1 - kx) * (1 + kx)) ** 0.5 + + k = m ** 0.5 + + if k > 1: + return cupy.nan + elif k == 1: + return cupy.arctanh(w) + + ks = [k] + niter = 0 + while ks[-1] != 0: + k_ = ks[-1] + k_p = _complement(k_) + ks.append((1 - k_p) / (1 + k_p)) + niter += 1 + if niter > _ARC_JAC_SN_MAXITER: + raise ValueError('Landen transformation not converging') + + K = cupy.prod(1 + cupy.array(ks[1:])) * pi/2 + + wns = [w] + + for kn, knext in zip(ks[:-1], ks[1:]): + wn = wns[-1] + wnext = (2 * wn / + ((1 + knext) * (1 + _complement(kn * wn)))) + wns.append(wnext) + + u = 2 / pi * cupy.arcsin(wns[-1]) + + z = K * u + return z + + +def _arc_jac_sc1(w, m): + """Real inverse Jacobian sc, with complementary modulus + + Solve for z in w = sc(z, 1-m) + + w - real scalar + + m - modulus + + Using that sc(z, m) = -i * sn(i * z, 1 - m) + cf scipy/signal/_filter_design.py analog for an explanation + and a reference. + + """ + + zcomplex = _arc_jac_sn(1j * w, m) + if abs(zcomplex.real) > 1e-14: + raise ValueError + + return zcomplex.imag + + +def ellipap(N, rp, rs): + """Return (z,p,k) of Nth-order elliptic analog lowpass filter. + + The filter is a normalized prototype that has `rp` decibels of ripple + in the passband and a stopband `rs` decibels down. + + The filter's angular (e.g., rad/s) cutoff frequency is normalized to 1, + defined as the point at which the gain first drops below ``-rp``. + + See Also + -------- + ellip : Filter design function using this prototype + scipy.signal.elliap + + """ + if abs(int(N)) != N: + raise ValueError("Filter order must be a nonnegative integer") + elif N == 0: + # Avoid divide-by-zero warning + # Even order filters have DC gain of -rp dB + return cupy.array([]), cupy.array([]), 10**(-rp/20) + elif N == 1: + p = -cupy.sqrt(1.0 / _pow10m1(0.1 * rp)) + k = -p + z = [] + return cupy.asarray(z), cupy.asarray(p), k + + eps_sq = _pow10m1(0.1 * rp) + + eps = cupy.sqrt(eps_sq) + ck1_sq = eps_sq / _pow10m1(0.1 * rs) + if ck1_sq == 0: + raise ValueError("Cannot design a filter with given rp and rs" + " specifications.") + + m = _ellipdeg(N, ck1_sq) + capk = special.ellipk(m) + j = cupy.arange(1 - N % 2, N, 2) + EPSILON = 2e-16 + + s, c, d, phi = special.ellipj(j * capk / N, m * cupy.ones_like(j)) + snew = cupy.compress(cupy.abs(s) > EPSILON, s, axis=-1) + z = 1.j / (cupy.sqrt(m) * snew) + z = cupy.concatenate((z, z.conj())) + + r = _arc_jac_sc1(1. / eps, ck1_sq) + v0 = capk * r / (N * special.ellipk(ck1_sq)) + + sv, cv, dv, phi = special.ellipj(v0, 1 - m) + p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0) + + if N % 2: + mask = cupy.abs(p.imag) > EPSILON * \ + cupy.sqrt((p * p.conj()).sum(axis=0).real) + newp = cupy.compress(mask, p, axis=-1) + p = cupy.concatenate((p, newp.conj())) + else: + p = cupy.concatenate((p, p.conj())) + + k = (cupy.prod(-p, axis=0) / cupy.prod(-z, axis=0)).real + if N % 2 == 0: + k = k / cupy.sqrt(1 + eps_sq) + + return z, p, k + + +# ### *ord functions to accopany *ap functions + +def _validate_gpass_gstop(gpass, gstop): + + if gpass <= 0.0: + raise ValueError("gpass should be larger than 0.0") + elif gstop <= 0.0: + raise ValueError("gstop should be larger than 0.0") + elif gpass > gstop: + raise ValueError("gpass should be smaller than gstop") + + +def _pre_warp(wp, ws, analog): + # Pre-warp frequencies for digital filter design + if not analog: + passb = cupy.tan(pi * wp / 2.0) + stopb = cupy.tan(pi * ws / 2.0) + else: + passb = wp * 1.0 + stopb = ws * 1.0 + return passb, stopb + + +def _validate_wp_ws(wp, ws, fs, analog): + wp = cupy.atleast_1d(wp) + ws = cupy.atleast_1d(ws) + if fs is not None: + if analog: + raise ValueError("fs cannot be specified for an analog filter") + wp = 2 * wp / fs + ws = 2 * ws / fs + + filter_type = 2 * (len(wp) - 1) + 1 + if wp[0] >= ws[0]: + filter_type += 1 + + return wp, ws, filter_type + + +def _find_nat_freq(stopb, passb, gpass, gstop, filter_type, filter_kind): + if filter_type == 1: # low + nat = stopb / passb + elif filter_type == 2: # high + nat = passb / stopb + elif filter_type == 3: # stop + wp0 = _optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, + args=(0, passb, stopb, gpass, gstop, + filter_kind), + disp=0) + passb[0] = wp0 + wp1 = _optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], + args=(1, passb, stopb, gpass, gstop, + filter_kind), + disp=0) + passb[1] = wp1 + nat = ((stopb * (passb[0] - passb[1])) / + (stopb ** 2 - passb[0] * passb[1])) + elif filter_type == 4: # pass + nat = ((stopb ** 2 - passb[0] * passb[1]) / + (stopb * (passb[0] - passb[1]))) + else: + raise ValueError(f"should not happen: {filter_type=}.") + + nat = min(cupy.abs(nat)) + return nat, passb + + +def _postprocess_wn(WN, analog, fs): + wn = WN if analog else cupy.arctan(WN) * 2.0 / pi + if len(wn) == 1: + wn = wn[0] + if fs is not None: + wn = wn * fs / 2 + return wn + + +def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type): + """ + Band Stop Objective Function for order minimization. + + Returns the non-integer order for an analog band stop filter. + + Parameters + ---------- + wp : scalar + Edge of passband `passb`. + ind : int, {0, 1} + Index specifying which `passb` edge to vary (0 or 1). + passb : ndarray + Two element sequence of fixed passband edges. + stopb : ndarray + Two element sequence of fixed stopband edges. + gstop : float + Amount of attenuation in stopband in dB. + gpass : float + Amount of ripple in the passband in dB. + type : {'butter', 'cheby', 'ellip'} + Type of filter. + + Returns + ------- + n : scalar + Filter order (possibly non-integer). + + See Also + -------- + scipy.signal.band_stop_obj + + """ + + _validate_gpass_gstop(gpass, gstop) + + passbC = passb.copy() + passbC[ind] = wp + nat = (stopb * (passbC[0] - passbC[1]) / + (stopb ** 2 - passbC[0] * passbC[1])) + nat = min(cupy.abs(nat)) + + if type == 'butter': + GSTOP = 10 ** (0.1 * cupy.abs(gstop)) + GPASS = 10 ** (0.1 * cupy.abs(gpass)) + n = (cupy.log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * cupy.log10(nat))) + elif type == 'cheby': + GSTOP = 10 ** (0.1 * cupy.abs(gstop)) + GPASS = 10 ** (0.1 * cupy.abs(gpass)) + n = cupy.arccosh( + cupy.sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / cupy.arccosh(nat) + elif type == 'ellip': + GSTOP = 10 ** (0.1 * gstop) + GPASS = 10 ** (0.1 * gpass) + arg1 = cupy.sqrt((GPASS - 1.0) / (GSTOP - 1.0)) + arg0 = 1.0 / nat + d0 = special.ellipk(cupy.array([arg0 ** 2, 1 - arg0 ** 2])) + d1 = special.ellipk(cupy.array([arg1 ** 2, 1 - arg1 ** 2])) + n = (d0[0] * d1[1] / (d0[1] * d1[0])) + else: + raise ValueError("Incorrect type: %s" % type) + return n + + +def buttord(wp, ws, gpass, gstop, analog=False, fs=None): + """Butterworth filter order selection. + + Return the order of the lowest order digital or analog Butterworth filter + that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies + (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + ord : int + The lowest order for a Butterworth filter which meets specs. + wn : ndarray or float + The Butterworth natural frequency (i.e. the "3dB frequency"). Should + be used with `butter` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `butter`. + + See Also + -------- + scipy.signal.buttord + butter : Filter design using order and critical points + cheb1ord : Find order and critical points from passband and stopband spec + cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + """ + _validate_gpass_gstop(gpass, gstop) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq( + stopb, passb, gpass, gstop, filter_type, 'butter') + + GSTOP = 10 ** (0.1 * cupy.abs(gstop)) + GPASS = 10 ** (0.1 * cupy.abs(gpass)) + ord = int(cupy.ceil(cupy.log10((GSTOP - 1.0) / + (GPASS - 1.0)) / (2 * cupy.log10(nat)))) + + # Find the Butterworth natural frequency WN (or the "3dB" frequency") + # to give exactly gpass at passb. + try: + W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord)) + except ZeroDivisionError: + W0 = 1.0 + warnings.warn("Order is zero...check input parameters.", + RuntimeWarning, 2) + + # now convert this frequency back from lowpass prototype + # to the original analog filter + + if filter_type == 1: # low + WN = W0 * passb + elif filter_type == 2: # high + WN = passb / W0 + elif filter_type == 3: # stop + WN = cupy.empty(2, float) + discr = cupy.sqrt((passb[1] - passb[0]) ** 2 + + 4 * W0 ** 2 * passb[0] * passb[1]) + WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0) + WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0) + WN = cupy.sort(cupy.abs(WN)) + elif filter_type == 4: # pass + W0 = cupy.array([-W0, W0], dtype=float) + WN = (-W0 * (passb[1] - passb[0]) / 2.0 + + cupy.sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 + + passb[0] * passb[1])) + WN = cupy.sort(cupy.abs(WN)) + else: + raise ValueError("Bad type: %s" % filter_type) + + wn = _postprocess_wn(WN, analog, fs) + + return ord, wn + + +def cheb1ord(wp, ws, gpass, gstop, analog=False, fs=None): + """Chebyshev type I filter order selection. + + Return the order of the lowest order digital or analog Chebyshev Type I + filter that loses no more than `gpass` dB in the passband and has at + least `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies + (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + Returns + ------- + ord : int + The lowest order for a Chebyshev type I filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `cheby1` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `cheby1`. + + See Also + -------- + scipy.signal.cheb1ord + cheby1 : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + """ + _validate_gpass_gstop(gpass, gstop) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq( + stopb, passb, gpass, gstop, filter_type, 'cheby') + + GSTOP = 10 ** (0.1 * cupy.abs(gstop)) + GPASS = 10 ** (0.1 * cupy.abs(gpass)) + v_pass_stop = cupy.arccosh(cupy.sqrt((GSTOP - 1.0) / (GPASS - 1.0))) + ord = int(cupy.ceil(v_pass_stop / cupy.arccosh(nat))) + + # Natural frequencies are just the passband edges + wn = _postprocess_wn(passb, analog, fs) + + return ord, wn + + +def cheb2ord(wp, ws, gpass, gstop, analog=False, fs=None): + """Chebyshev type II filter order selection. + + Return the order of the lowest order digital or analog Chebyshev Type II + filter that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies + (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + Returns + ------- + ord : int + The lowest order for a Chebyshev type II filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `cheby2` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `cheby2`. + + See Also + -------- + scipy.signal.cheb2ord + cheby2 : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb1ord, ellipord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + + """ + _validate_gpass_gstop(gpass, gstop) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq( + stopb, passb, gpass, gstop, filter_type, 'cheby') + + GSTOP = 10 ** (0.1 * cupy.abs(gstop)) + GPASS = 10 ** (0.1 * cupy.abs(gpass)) + v_pass_stop = cupy.arccosh(cupy.sqrt((GSTOP - 1.0) / (GPASS - 1.0))) + ord = int(cupy.ceil(v_pass_stop / cupy.arccosh(nat))) + + # Find frequency where analog response is -gpass dB. + # Then convert back from low-pass prototype to the original filter. + + new_freq = cupy.cosh(1.0 / ord * v_pass_stop) + new_freq = 1.0 / new_freq + + if filter_type == 1: + nat = passb / new_freq + elif filter_type == 2: + nat = passb * new_freq + elif filter_type == 3: + nat = cupy.empty(2, dtype=float) + nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) + + cupy.sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 + + passb[1] * passb[0])) + nat[1] = passb[1] * passb[0] / nat[0] + elif filter_type == 4: + nat = cupy.empty(2, dtype=float) + nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) + + cupy.sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) + + passb[1] * passb[0])) + nat[1] = passb[0] * passb[1] / nat[0] + + wn = _postprocess_wn(nat, analog, fs) + + return ord, wn + + +def ellipord(wp, ws, gpass, gstop, analog=False, fs=None): + """Elliptic (Cauer) filter order selection. + + Return the order of the lowest order digital or analog elliptic filter + that loses no more than `gpass` dB in the passband and has at least + `gstop` dB attenuation in the stopband. + + Parameters + ---------- + wp, ws : float + Passband and stopband edge frequencies. + + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`wp` and `ws` are thus in + half-cycles / sample.) For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies + (e.g., rad/s). + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + fs : float, optional + The sampling frequency of the digital system. + + Returns + ------- + ord : int + The lowest order for an Elliptic (Cauer) filter that meets specs. + wn : ndarray or float + The Chebyshev natural frequency (the "3dB frequency") for use with + `ellip` to give filter results. If `fs` is specified, + this is in the same units, and `fs` must also be passed to `ellip`. + + See Also + -------- + scipy.signal.ellipord + ellip : Filter design using order and critical points + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord + iirfilter : General filter design using order and critical frequencies + iirdesign : General filter design using passband and stopband spec + """ + _validate_gpass_gstop(gpass, gstop) + wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) + passb, stopb = _pre_warp(wp, ws, analog) + nat, passb = _find_nat_freq( + stopb, passb, gpass, gstop, filter_type, 'ellip') + + arg1_sq = _pow10m1(0.1 * gpass) / _pow10m1(0.1 * gstop) + arg0 = 1.0 / nat + d0 = special.ellipk(arg0 ** 2), special.ellipkm1(arg0 ** 2) + d1 = special.ellipk(arg1_sq), special.ellipkm1(arg1_sq) + ord = int(cupy.ceil(d0[0] * d1[1] / (d0[1] * d1[0]))) + + wn = _postprocess_wn(passb, analog, fs) + + return ord, wn diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_iir_filter_design.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_iir_filter_design.py new file mode 100644 index 0000000000000000000000000000000000000000..ef5a494e4a14752cffd864c13afa06038744780d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_iir_filter_design.py @@ -0,0 +1,994 @@ +"""IIR filter design APIs""" +from math import pi +import math + +import cupy + +from cupyx.scipy.signal._iir_filter_conversions import ( + lp2bp_zpk, lp2lp_zpk, lp2hp_zpk, lp2bs_zpk, bilinear_zpk, zpk2tf, zpk2sos) +from cupyx.scipy.signal._iir_filter_conversions import ( + buttap, cheb1ap, cheb2ap, ellipap, buttord, ellipord, cheb1ord, cheb2ord, + _validate_gpass_gstop) + + +# FIXME + +def besselap(): + raise NotImplementedError + + +bessel_norms = {'fix': 'me'} + + +def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False, + ftype='butter', output='ba', fs=None): + """ + IIR digital and analog filter design given order and critical points. + + Design an Nth-order digital or analog filter and return the filter + coefficients. + + Parameters + ---------- + N : int + The order of the filter. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + + When Wn is a length-2 sequence, ``Wn[0]`` must be less than ``Wn[1]``. + rp : float, optional + For Chebyshev and elliptic filters, provides the maximum ripple + in the passband. (dB) + rs : float, optional + For Chebyshev and elliptic filters, provides the minimum attenuation + in the stop band. (dB) + btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional + The type of filter. Default is 'bandpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + ftype : str, optional + The type of IIR filter to design: + + - Butterworth : 'butter' + - Chebyshev I : 'cheby1' + - Chebyshev II : 'cheby2' + - Cauer/elliptic: 'ellip' + - Bessel/Thomson: 'bessel' + + output : {'ba', 'zpk', 'sos'}, optional + Filter form of the output: + + - second-order sections (recommended): 'sos' + - numerator/denominator (default) : 'ba' + - pole-zero : 'zpk' + + In general the second-order sections ('sos') form is + recommended because inferring the coefficients for the + numerator/denominator form ('ba') suffers from numerical + instabilities. For reasons of backward compatibility the default + form is the numerator/denominator form ('ba'), where the 'b' + and the 'a' in 'ba' refer to the commonly used names of the + coefficients used. + + Note: Using the second-order sections form ('sos') is sometimes + associated with additional computational costs: for + data-intense use cases it is therefore recommended to also + investigate the numerator/denominator form ('ba'). + + fs : float, optional + The sampling frequency of the digital system. + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + butter : Filter design using order and critical points + cheby1, cheby2, ellip, bessel + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord, ellipord + iirdesign : General filter design using passband and stopband spec + scipy.signal.iirfilter + + """ + ftype, btype, output = [x.lower() for x in (ftype, btype, output)] + + Wn = cupy.asarray(Wn) + # if cupy.any(Wn <= 0): + # raise ValueError("filter critical frequencies must be greater than 0") + + if Wn.size > 1 and not Wn[0] < Wn[1]: + raise ValueError("Wn[0] must be less than Wn[1]") + + if fs is not None: + if analog: + raise ValueError("fs cannot be specified for an analog filter") + Wn = 2*Wn/fs + + try: + btype = band_dict[btype] + except KeyError as e: + raise ValueError( + "'%s' is an invalid bandtype for filter." % btype) from e + + try: + typefunc = filter_dict[ftype][0] + except KeyError as e: + raise ValueError( + "'%s' is not a valid basic IIR filter." % ftype) from e + + if output not in ['ba', 'zpk', 'sos']: + raise ValueError("'%s' is not a valid output form." % output) + + if rp is not None and rp < 0: + raise ValueError("passband ripple (rp) must be positive") + + if rs is not None and rs < 0: + raise ValueError("stopband attenuation (rs) must be positive") + + # Get analog lowpass prototype + if typefunc == buttap: + z, p, k = typefunc(N) + elif typefunc == besselap: + z, p, k = typefunc(N, norm=bessel_norms[ftype]) + elif typefunc == cheb1ap: + if rp is None: + raise ValueError("passband ripple (rp) must be provided to " + "design a Chebyshev I filter.") + z, p, k = typefunc(N, rp) + elif typefunc == cheb2ap: + if rs is None: + raise ValueError("stopband attenuation (rs) must be provided to " + "design an Chebyshev II filter.") + z, p, k = typefunc(N, rs) + elif typefunc == ellipap: + if rs is None or rp is None: + raise ValueError("Both rp and rs must be provided to design an " + "elliptic filter.") + z, p, k = typefunc(N, rp, rs) + else: + raise NotImplementedError("'%s' not implemented in iirfilter." % ftype) + + # Pre-warp frequencies for digital filter design + if not analog: + if cupy.any(Wn <= 0) or cupy.any(Wn >= 1): + if fs is not None: + raise ValueError("Digital filter critical frequencies must " + f"be 0 < Wn < fs/2 (fs={fs} -> fs/2={fs/2})") + raise ValueError("Digital filter critical frequencies " + "must be 0 < Wn < 1") + fs = 2.0 + warped = 2 * fs * cupy.tan(pi * Wn / fs) + else: + warped = Wn + + # transform to lowpass, bandpass, highpass, or bandstop + if btype in ('lowpass', 'highpass'): + if cupy.size(Wn) != 1: + raise ValueError('Must specify a single critical frequency Wn ' + 'for lowpass or highpass filter') + + if btype == 'lowpass': + z, p, k = lp2lp_zpk(z, p, k, wo=warped) + elif btype == 'highpass': + z, p, k = lp2hp_zpk(z, p, k, wo=warped) + elif btype in ('bandpass', 'bandstop'): + try: + bw = warped[1] - warped[0] + wo = cupy.sqrt(warped[0] * warped[1]) + except IndexError as e: + raise ValueError('Wn must specify start and stop frequencies for ' + 'bandpass or bandstop filter') from e + + if btype == 'bandpass': + z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw) + elif btype == 'bandstop': + z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw) + else: + raise NotImplementedError("'%s' not implemented in iirfilter." % btype) + + # Find discrete equivalent if necessary + if not analog: + z, p, k = bilinear_zpk(z, p, k, fs=fs) + + # Transform to proper out type (pole-zero, state-space, numer-denom) + if output == 'zpk': + return z, p, k + elif output == 'ba': + return zpk2tf(z, p, k) + elif output == 'sos': + return zpk2sos(z, p, k, analog=analog) + + +def butter(N, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Butterworth digital and analog filter design. + + Design an Nth-order digital or analog Butterworth filter and return + the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. For 'bandpass' and 'bandstop' filters, + the resulting order of the final second-order sections ('sos') + matrix is ``2*N``, with `N` the number of biquad sections + of the desired system. + Wn : array_like + The critical frequency or frequencies. For lowpass and highpass + filters, Wn is a scalar; for bandpass and bandstop filters, + Wn is a length-2 sequence. + + For a Butterworth filter, this is the point at which the gain + drops to 1/sqrt(2) that of the passband (the "-3 dB point"). + + For digital filters, if `fs` is not specified, `Wn` units are + normalized from 0 to 1, where 1 is the Nyquist frequency (`Wn` is + thus in half cycles / sample and defined as 2*critical frequencies + / `fs`). If `fs` is specified, `Wn` is in the same units as `fs`. + + For analog filters, `Wn` is an angular frequency (e.g. rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + buttord, buttap + iirfilter + scipy.signal.butter + + + Notes + ----- + The Butterworth filter has maximally flat frequency response in the + passband. + + If the transfer function form ``[b, a]`` is requested, numerical + problems can occur since the conversion between roots and + the polynomial coefficients is a numerically sensitive operation, + even for N >= 4. It is recommended to work with the SOS + representation. + + .. warning:: + Designing high-order and narrowband IIR filters in TF form can + result in unstable or incorrect filtering due to floating point + numerical precision issues. Consider inspecting output filter + characteristics `freqz` or designing the filters with second-order + sections via ``output='sos'``. + """ + return iirfilter(N, Wn, btype=btype, analog=analog, + output=output, ftype='butter', fs=fs) + + +def cheby1(N, rp, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Chebyshev type I digital and analog filter design. + + Design an Nth-order digital or analog Chebyshev type I filter and + return the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rp : float + The maximum ripple allowed below unity gain in the passband. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For Type I filters, this is the point in the transition band at which + the gain first drops below -`rp`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + cheb1ord, cheb1ap + iirfilter + scipy.signal.cheby1 + + Notes + ----- + The Chebyshev type I filter maximizes the rate of cutoff between the + frequency response's passband and stopband, at the expense of ripple in + the passband and increased ringing in the step response. + + Type I filters roll off faster than Type II (`cheby2`), but Type II + filters do not have any ripple in the passband. + + The equiripple passband has N maxima or minima (for example, a + 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is + unity for odd-order filters, or -rp dB for even-order filters. + """ + return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, + output=output, ftype='cheby1', fs=fs) + + +def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Chebyshev type II digital and analog filter design. + + Design an Nth-order digital or analog Chebyshev type II filter and + return the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rs : float + The minimum attenuation required in the stop band. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For Type II filters, this is the point in the transition band at which + the gain first reaches -`rs`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + cheb2ord, cheb2ap + iirfilter + scipy.signal.cheby2 + + Notes + ----- + The Chebyshev type II filter maximizes the rate of cutoff between the + frequency response's passband and stopband, at the expense of ripple in + the stopband and increased ringing in the step response. + + Type II filters do not roll off as fast as Type I (`cheby1`). + """ + return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, + output=output, ftype='cheby2', fs=fs) + + +def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba', fs=None): + """ + Elliptic (Cauer) digital and analog filter design. + + Design an Nth-order digital or analog elliptic filter and return + the filter coefficients. + + Parameters + ---------- + N : int + The order of the filter. + rp : float + The maximum ripple allowed below unity gain in the passband. + Specified in decibels, as a positive number. + rs : float + The minimum attenuation required in the stop band. + Specified in decibels, as a positive number. + Wn : array_like + A scalar or length-2 sequence giving the critical frequencies. + For elliptic filters, this is the point in the transition band at + which the gain first drops below -`rp`. + + For digital filters, `Wn` are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. (`Wn` is thus in + half-cycles / sample.) + + For analog filters, `Wn` is an angular frequency (e.g., rad/s). + btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional + The type of filter. Default is 'lowpass'. + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + output : {'ba', 'zpk', 'sos'}, optional + Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or + second-order sections ('sos'). Default is 'ba' for backwards + compatibility, but 'sos' should be used for general-purpose filtering. + fs : float, optional + The sampling frequency of the digital system. + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + ellipord, ellipap + iirfilter + scipy.signal.ellip + + Notes + ----- + Also known as Cauer or Zolotarev filters, the elliptical filter maximizes + the rate of transition between the frequency response's passband and + stopband, at the expense of ripple in both, and increased ringing in the + step response. + + As `rp` approaches 0, the elliptical filter becomes a Chebyshev + type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev + type I filter (`cheby1`). As both approach 0, it becomes a Butterworth + filter (`butter`). + + The equiripple passband has N maxima or minima (for example, a + 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is + unity for odd-order filters, or -rp dB for even-order filters. + """ + return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, + output=output, ftype='elliptic', fs=fs) + + +def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba', + fs=None): + """Complete IIR digital and analog filter design. + + Given passband and stopband frequencies and gains, construct an analog or + digital IIR filter of minimum order for a given basic type. Return the + output in numerator, denominator ('ba'), pole-zero ('zpk') or second order + sections ('sos') form. + + Parameters + ---------- + wp, ws : float or array like, shape (2,) + Passband and stopband edge frequencies. Possible values are scalars + (for lowpass and highpass filters) or ranges (for bandpass and bandstop + filters). + For digital filters, these are in the same units as `fs`. By default, + `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, + where 1 is the Nyquist frequency. For example: + + - Lowpass: wp = 0.2, ws = 0.3 + - Highpass: wp = 0.3, ws = 0.2 + - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] + - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] + + For analog filters, `wp` and `ws` are angular frequencies + (e.g., rad/s). Note, that for bandpass and bandstop filters passband + must lie strictly inside stopband or vice versa. + gpass : float + The maximum loss in the passband (dB). + gstop : float + The minimum attenuation in the stopband (dB). + analog : bool, optional + When True, return an analog filter, otherwise a digital filter is + returned. + ftype : str, optional + The type of IIR filter to design: + + - Butterworth : 'butter' + - Chebyshev I : 'cheby1' + - Chebyshev II : 'cheby2' + - Cauer/elliptic: 'ellip' + + output : {'ba', 'zpk', 'sos'}, optional + Filter form of the output: + + - second-order sections (recommended): 'sos' + - numerator/denominator (default) : 'ba' + - pole-zero : 'zpk' + + In general the second-order sections ('sos') form is + recommended because inferring the coefficients for the + numerator/denominator form ('ba') suffers from numerical + instabilities. For reasons of backward compatibility the default + form is the numerator/denominator form ('ba'), where the 'b' + and the 'a' in 'ba' refer to the commonly used names of the + coefficients used. + + Note: Using the second-order sections form ('sos') is sometimes + associated with additional computational costs: for + data-intense use cases it is therefore recommended to also + investigate the numerator/denominator form ('ba'). + + fs : float, optional + The sampling frequency of the digital system. + + .. versionadded:: 1.2.0 + + Returns + ------- + b, a : ndarray, ndarray + Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. + Only returned if ``output='ba'``. + z, p, k : ndarray, ndarray, float + Zeros, poles, and system gain of the IIR filter transfer + function. Only returned if ``output='zpk'``. + sos : ndarray + Second-order sections representation of the IIR filter. + Only returned if ``output='sos'``. + + See Also + -------- + scipy.signal.iirdesign + butter : Filter design using order and critical points + cheby1, cheby2, ellip, bessel + buttord : Find order and critical points from passband and stopband spec + cheb1ord, cheb2ord, ellipord + iirfilter : General filter design using order and critical frequencies + """ + try: + ordfunc = filter_dict[ftype][1] + except KeyError as e: + raise ValueError("Invalid IIR filter type: %s" % ftype) from e + except IndexError as e: + raise ValueError(("%s does not have order selection. Use " + "iirfilter function.") % ftype) from e + + _validate_gpass_gstop(gpass, gstop) + + wp = cupy.atleast_1d(wp) + ws = cupy.atleast_1d(ws) + + if wp.shape[0] != ws.shape[0] or wp.shape not in [(1,), (2,)]: + raise ValueError("wp and ws must have one or two elements each, and" + "the same shape, got %s and %s" + % (wp.shape, ws.shape)) + + if any(wp <= 0) or any(ws <= 0): + raise ValueError("Values for wp, ws must be greater than 0") + + if not analog: + if fs is None: + if any(wp >= 1) or any(ws >= 1): + raise ValueError("Values for wp, ws must be less than 1") + elif any(wp >= fs/2) or any(ws >= fs/2): + raise ValueError("Values for wp, ws must be less than fs/2" + " (fs={} -> fs/2={})".format(fs, fs/2)) + + if wp.shape[0] == 2: + if not ((ws[0] < wp[0] and wp[1] < ws[1]) or + (wp[0] < ws[0] and ws[1] < wp[1])): + raise ValueError("Passband must lie strictly inside stopband" + " or vice versa") + + band_type = 2 * (len(wp) - 1) + band_type += 1 + if wp[0] >= ws[0]: + band_type += 1 + + btype = {1: 'lowpass', 2: 'highpass', + 3: 'bandstop', 4: 'bandpass'}[band_type] + + N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog, fs=fs) + return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, + ftype=ftype, output=output, fs=fs) + + +def iircomb(w0, Q, ftype='notch', fs=2.0, *, pass_zero=False): + """ + Design IIR notching or peaking digital comb filter. + + A notching comb filter consists of regularly-spaced band-stop filters with + a narrow bandwidth (high quality factor). Each rejects a narrow frequency + band and leaves the rest of the spectrum little changed. + + A peaking comb filter consists of regularly-spaced band-pass filters with + a narrow bandwidth (high quality factor). Each rejects components outside + a narrow frequency band. + + Parameters + ---------- + w0 : float + The fundamental frequency of the comb filter (the spacing between its + peaks). This must evenly divide the sampling frequency. If `fs` is + specified, this is in the same units as `fs`. By default, it is + a normalized scalar that must satisfy ``0 < w0 < 1``, with + ``w0 = 1`` corresponding to half of the sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + notch filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + ftype : {'notch', 'peak'} + The type of comb filter generated by the function. If 'notch', then + the Q factor applies to the notches. If 'peak', then the Q factor + applies to the peaks. Default is 'notch'. + fs : float, optional + The sampling frequency of the signal. Default is 2.0. + pass_zero : bool, optional + If False (default), the notches (nulls) of the filter are centered on + frequencies [0, w0, 2*w0, ...], and the peaks are centered on the + midpoints [w0/2, 3*w0/2, 5*w0/2, ...]. If True, the peaks are centered + on [0, w0, 2*w0, ...] (passing zero frequency) and vice versa. + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + + Raises + ------ + ValueError + If `w0` is less than or equal to 0 or greater than or equal to + ``fs/2``, if `fs` is not divisible by `w0`, if `ftype` + is not 'notch' or 'peak' + + See Also + -------- + scipy.signal.iircomb + iirnotch + iirpeak + + Notes + ----- + The TF implementation of the + comb filter is numerically stable even at higher orders due to the + use of a single repeated pole, which won't suffer from precision loss. + + References + ---------- + Sophocles J. Orfanidis, "Introduction To Signal Processing", + Prentice-Hall, 1996, ch. 11, "Digital Filter Design" + """ + + # Convert w0, Q, and fs to float + w0 = float(w0) + Q = float(Q) + fs = float(fs) + + # Check for invalid cutoff frequency or filter type + ftype = ftype.lower() + if not 0 < w0 < fs / 2: + raise ValueError("w0 must be between 0 and {}" + " (nyquist), but given {}.".format(fs / 2, w0)) + if ftype not in ('notch', 'peak'): + raise ValueError('ftype must be either notch or peak.') + + # Compute the order of the filter + N = round(fs / w0) + + # Check for cutoff frequency divisibility + if abs(w0 - fs/N)/fs > 1e-14: + raise ValueError('fs must be divisible by w0.') + + # Compute frequency in radians and filter bandwidth + # Eq. 11.3.1 (p. 574) from reference [1] + w0 = (2 * pi * w0) / fs + w_delta = w0 / Q + + # Define base gain values depending on notch or peak filter + # Compute -3dB attenuation + # Eqs. 11.4.1 and 11.4.2 (p. 582) from reference [1] + if ftype == 'notch': + G0, G = 1, 0 + elif ftype == 'peak': + G0, G = 0, 1 + GB = 1 / math.sqrt(2) + + # Compute beta + # Eq. 11.5.3 (p. 591) from reference [1] + beta = math.sqrt((GB**2 - G0**2) / (G**2 - GB**2)) * \ + math.tan(N * w_delta / 4) + + # Compute filter coefficients + # Eq 11.5.1 (p. 590) variables a, b, c from reference [1] + ax = (1 - beta) / (1 + beta) + bx = (G0 + G * beta) / (1 + beta) + cx = (G0 - G * beta) / (1 + beta) + + # Last coefficients are negative to get peaking comb that passes zero or + # notching comb that doesn't. + negative_coef = ((ftype == 'peak' and pass_zero) or + (ftype == 'notch' and not pass_zero)) + + # Compute numerator coefficients + # Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1] + # b - cz^-N or b + cz^-N + b = cupy.zeros(N + 1) + b[0] = bx + if negative_coef: + b[-1] = -cx + else: + b[-1] = +cx + + # Compute denominator coefficients + # Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1] + # 1 - az^-N or 1 + az^-N + a = cupy.zeros(N + 1) + a[0] = 1 + if negative_coef: + a[-1] = -ax + else: + a[-1] = +ax + + return b, a + + +def iirnotch(w0, Q, fs=2.0): + """ + Design second-order IIR notch digital filter. + + A notch filter is a band-stop filter with a narrow bandwidth + (high quality factor). It rejects a narrow frequency band and + leaves the rest of the spectrum little changed. + + Parameters + ---------- + w0 : float + Frequency to remove from a signal. If `fs` is specified, this is in + the same units as `fs`. By default, it is a normalized scalar that must + satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the + sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + notch filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + fs : float, optional + The sampling frequency of the digital system. + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + + See Also + -------- + scipy.signal.iirnotch + + References + ---------- + Sophocles J. Orfanidis, "Introduction To Signal Processing", + Prentice-Hall, 1996 + """ + + return _design_notch_peak_filter(w0, Q, "notch", fs) + + +def iirpeak(w0, Q, fs=2.0): + """ + Design second-order IIR peak (resonant) digital filter. + + A peak filter is a band-pass filter with a narrow bandwidth + (high quality factor). It rejects components outside a narrow + frequency band. + + Parameters + ---------- + w0 : float + Frequency to be retained in a signal. If `fs` is specified, this is in + the same units as `fs`. By default, it is a normalized scalar that must + satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the + sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + peak filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + fs : float, optional + The sampling frequency of the digital system. + + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + + See Also + -------- + scpy.signal.iirpeak + + References + ---------- + Sophocles J. Orfanidis, "Introduction To Signal Processing", + Prentice-Hall, 1996 + """ + + return _design_notch_peak_filter(w0, Q, "peak", fs) + + +def _design_notch_peak_filter(w0, Q, ftype, fs=2.0): + """ + Design notch or peak digital filter. + + Parameters + ---------- + w0 : float + Normalized frequency to remove from a signal. If `fs` is specified, + this is in the same units as `fs`. By default, it is a normalized + scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` + corresponding to half of the sampling frequency. + Q : float + Quality factor. Dimensionless parameter that characterizes + notch filter -3 dB bandwidth ``bw`` relative to its center + frequency, ``Q = w0/bw``. + ftype : str + The type of IIR filter to design: + + - notch filter : ``notch`` + - peak filter : ``peak`` + fs : float, optional + The sampling frequency of the digital system. + + Returns + ------- + b, a : ndarray, ndarray + Numerator (``b``) and denominator (``a``) polynomials + of the IIR filter. + """ + + # Guarantee that the inputs are floats + w0 = float(w0) + Q = float(Q) + w0 = 2 * w0 / fs + + # Checks if w0 is within the range + if w0 > 1.0 or w0 < 0.0: + raise ValueError("w0 should be such that 0 < w0 < 1") + + # Get bandwidth + bw = w0 / Q + + # Normalize inputs + bw = bw * pi + w0 = w0 * pi + + # Compute -3dB attenuation + gb = 1 / math.sqrt(2) + + if ftype == "notch": + # Compute beta: formula 11.3.4 (p.575) from reference [1] + beta = (math.sqrt(1.0 - gb**2.0) / gb) * math.tan(bw / 2.0) + elif ftype == "peak": + # Compute beta: formula 11.3.19 (p.579) from reference [1] + beta = (gb / math.sqrt(1.0 - gb**2.0)) * math.tan(bw / 2.0) + else: + raise ValueError("Unknown ftype.") + + # Compute gain: formula 11.3.6 (p.575) from reference [1] + gain = 1.0 / (1.0 + beta) + + # Compute numerator b and denominator a + # formulas 11.3.7 (p.575) and 11.3.21 (p.579) + # from reference [1] + if ftype == "notch": + b = [gain * x for x in (1.0, -2.0 * math.cos(w0), 1.0)] + else: + b = [(1.0 - gain) * x for x in (1.0, 0.0, -1.0)] + + a = [1.0, -2.0 * gain * math.cos(w0), 2.0 * gain - 1.0] + + a = cupy.asarray(a) + b = cupy.asarray(b) + + return b, a + + +filter_dict = {'butter': [buttap, buttord], + 'butterworth': [buttap, buttord], + + 'cauer': [ellipap, ellipord], + 'elliptic': [ellipap, ellipord], + 'ellip': [ellipap, ellipord], + + 'bessel': [besselap], + 'bessel_phase': [besselap], + 'bessel_delay': [besselap], + 'bessel_mag': [besselap], + + 'cheby1': [cheb1ap, cheb1ord], + 'chebyshev1': [cheb1ap, cheb1ord], + 'chebyshevi': [cheb1ap, cheb1ord], + + 'cheby2': [cheb2ap, cheb2ord], + 'chebyshev2': [cheb2ap, cheb2ord], + 'chebyshevii': [cheb2ap, cheb2ord], + } + +band_dict = {'band': 'bandpass', + 'bandpass': 'bandpass', + 'pass': 'bandpass', + 'bp': 'bandpass', + + 'bs': 'bandstop', + 'bandstop': 'bandstop', + 'bands': 'bandstop', + 'stop': 'bandstop', + + 'l': 'lowpass', + 'low': 'lowpass', + 'lowpass': 'lowpass', + 'lp': 'lowpass', + + 'high': 'highpass', + 'highpass': 'highpass', + 'h': 'highpass', + 'hp': 'highpass', + } diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_iir_utils.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_iir_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..68cb28898c42d6de89589c903a96e096e3fd1671 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_iir_utils.py @@ -0,0 +1,737 @@ + +from itertools import product + +import cupy +from cupy._core.internal import _normalize_axis_index +from cupy._core._scalar import get_typename +from cupy_backends.cuda.api import runtime +from cupyx.scipy.signal._arraytools import axis_slice + + +def _get_typename(dtype): + typename = get_typename(dtype) + if cupy.dtype(dtype).kind == 'c': + typename = 'thrust::' + typename + elif typename == 'float16': + if runtime.is_hip: + # 'half' in name_expressions weirdly raises + # HIPRTC_ERROR_NAME_EXPRESSION_NOT_VALID in getLoweredName() on + # ROCm + typename = '__half' + else: + typename = 'half' + return typename + + +FLOAT_TYPES = [cupy.float16, cupy.float32, cupy.float64] +INT_TYPES = [cupy.int8, cupy.int16, cupy.int32, cupy.int64] +COMPLEX_TYPES = [cupy.complex64, cupy.complex128] +UNSIGNED_TYPES = [cupy.uint8, cupy.uint16, cupy.uint32, cupy.uint64] +TYPES = FLOAT_TYPES + INT_TYPES + UNSIGNED_TYPES + COMPLEX_TYPES # type: ignore # NOQA +TYPE_PAIRS = [(x, y) for x, y in product(TYPES, TYPES) + if cupy.promote_types(x, y) is cupy.dtype(x)] + +TYPE_NAMES = [_get_typename(t) for t in TYPES] +TYPE_PAIR_NAMES = [(_get_typename(x), _get_typename(y)) for x, y in TYPE_PAIRS] + + +IIR_KERNEL = r""" +#include +#include +#include + +template +__global__ void compute_correction_factors( + const int m, const int k, const T* b, U* out) { + int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx >= k) { + return; + } + + U* out_start = out + idx * (k + m); + U* out_off = out_start + k; + + for(int i = 0; i < m; i++) { + U acc = 0.0; + for(int j = 0; j < k; j++) { + acc += ((U) b[j]) * out_off[i - j - 1]; + + } + out_off[i] = acc; + } +} + +template +__global__ void first_pass_iir( + const int m, const int k, const int n, const int n_blocks, + const int carries_stride, const T* factors, T* out, + T* carries) { + int orig_idx = blockDim.x * (blockIdx.x % n_blocks) + threadIdx.x; + + int num_row = blockIdx.x / n_blocks; + int idx = 2 * orig_idx + 1; + + if(idx >= n) { + return; + } + + int group_num = idx / m; + int group_pos = idx % m; + + T* out_off = out + num_row * n; + T* carries_off = carries + num_row * carries_stride; + + T* group_start = out_off + m * group_num; + T* group_carries = carries_off + k * group_num; + + int pos = group_pos; + int up_bound = pos; + int low_bound = pos; + int rel_pos; + + for(int level = 1, iter = 1; level < m; level *=2, iter++) { + int sz = min(pow(2.0f, ((float) iter)), ((float) m)); + + if(level > 1) { + int factor = ceil(pos / ((float) sz)); + up_bound = sz * factor - 1; + low_bound = up_bound - level + 1; + } + + if(level == 1) { + pos = low_bound; + } + + if(pos < low_bound) { + pos += level / 2; + } + + if(pos + m * group_num >= n) { + break; + } + + rel_pos = pos % level; + T carry = 0.0; + for(int i = 1; i <= min(k, level); i++) { + T k_value = group_start[low_bound - i]; + const T* k_factors = factors + (m + k) * (i - 1) + k; + T factor = k_factors[rel_pos]; + carry += k_value * factor; + } + + group_start[pos] += carry; + __syncthreads(); + } + + if(pos >= m - k) { + if(carries != NULL) { + group_carries[pos - (m - k)] = group_start[pos]; + } + } + +} + +template +__global__ void correct_carries( + const int m, const int k, const int n_blocks, const int carries_stride, + const int offset, const T* factors, T* carries) { + + int idx = threadIdx.x; + int pos = idx + (m - k); + T* row_carries = carries + carries_stride * blockIdx.x; + + for(int i = offset; i < n_blocks; i++) { + T* this_carries = row_carries + k * (i + (1 - offset)); + T* prev_carries = row_carries + k * (i - offset); + + T carry = 0.0; + for(int j = 1; j <= k; j++) { + const T* k_factors = factors + (m + k) * (j - 1) + k; + T factor = k_factors[pos]; + T k_value = prev_carries[k - j]; + carry += factor * k_value; + } + + this_carries[idx] += carry; + __syncthreads(); + } +} + +template +__global__ void second_pass_iir( + const int m, const int k, const int n, const int carries_stride, + const int n_blocks, const int offset, const T* factors, + T* carries, T* out) { + + int idx = blockDim.x * (blockIdx.x % n_blocks) + threadIdx.x; + idx += offset * m; + + int row_num = blockIdx.x / n_blocks; + int n_group = idx / m; + int pos = idx % m; + + if(idx >= n) { + return; + } + + T* out_off = out + row_num * n; + T* carries_off = carries + row_num * carries_stride; + const T* prev_carries = carries_off + (n_group - offset) * k; + + T carry = 0.0; + for(int i = 1; i <= k; i++) { + const T* k_factors = factors + (m + k) * (i - 1) + k; + T factor = k_factors[pos]; + T k_value = prev_carries[k - i]; + carry += factor * k_value; + } + + out_off[idx] += carry; +} +""" + +IIR_SOS_KERNEL = r""" +#include +#include +#include + +template +__global__ void pick_carries( + const int m, const int n, const int carries_stride, const int n_blocks, + const int offset, T* x, T* carries) { + + int idx = m * (blockIdx.x % n_blocks) + threadIdx.x + m - 2; + int pos = threadIdx.x; + int row_num = blockIdx.x / n_blocks; + int n_group = idx / m; + + T* x_off = x + row_num * n; + T* carries_off = carries + row_num * carries_stride; + T* group_carries = carries_off + (n_group + (1 - offset)) * 2; + + if(idx >= n) { + return; + } + + group_carries[pos] = x_off[idx]; +} + +template +__global__ void compute_correction_factors_sos( + const int m, const T* f_const, U* all_out) { + + extern __shared__ __align__(sizeof(T)) thrust::complex bc_d[2]; + T* b_c = reinterpret_cast(bc_d); + + extern __shared__ __align__(sizeof(T)) thrust::complex off_d[4]; + U* off_cache = reinterpret_cast(off_d); + + int idx = threadIdx.x; + int num_section = blockIdx.x; + + const int n_const = 6; + const int a_off = 3; + const int k = 2; + const int off_idx = 1; + + U* out = all_out + num_section * k * m; + U* out_start = out + idx * m; + const T* b = f_const + num_section * n_const + a_off + 1; + + b_c[idx] = b[idx]; + __syncthreads(); + + U* this_cache = off_cache + k * idx; + this_cache[off_idx - idx] = 1; + this_cache[idx] = 0; + + for(int i = 0; i < m; i++) { + U acc = 0.0; + for(int j = 0; j < k; j++) { + acc += -((U) b_c[j]) * this_cache[off_idx - j]; + + } + this_cache[0] = this_cache[1]; + this_cache[1] = acc; + out_start[i] = acc; + } +} + + +template +__global__ void first_pass_iir_sos( + const int m, const int n, const int n_blocks, + const T* factors, T* out, T* carries) { + + extern __shared__ unsigned int thread_status[2]; + extern __shared__ __align__(sizeof(T)) thrust::complex fc_d[2 * 1024]; + T* factor_cache = reinterpret_cast(fc_d); + + int orig_idx = blockDim.x * (blockIdx.x % n_blocks) + threadIdx.x; + + int num_row = blockIdx.x / n_blocks; + int idx = 2 * orig_idx + 1; + const int k = 2; + + if(idx >= n) { + return; + } + + int group_num = idx / m; + int group_pos = idx % m; + T* out_off = out + num_row * n; + T* carries_off = carries + num_row * n_blocks * k; + + T* group_start = out_off + m * group_num; + T* group_carries = carries_off + group_num * k; + + const T* section_factors = factors; + T* section_carries = group_carries; + + factor_cache[group_pos] = section_factors[group_pos]; + factor_cache[group_pos - 1] = section_factors[group_pos - 1]; + factor_cache[m + group_pos] = section_factors[m + group_pos]; + factor_cache[m + group_pos - 1] = section_factors[m + group_pos - 1]; + __syncthreads(); + + int pos = group_pos; + int up_bound = pos; + int low_bound = pos; + int rel_pos; + + for(int level = 1, iter = 1; level < m; level *= 2, iter++) { + int sz = min(pow(2.0f, ((float) iter)), ((float) m)); + + if(level > 1) { + int factor = ceil(pos / ((float) sz)); + up_bound = sz * factor - 1; + low_bound = up_bound - level + 1; + } + + if(level == 1) { + pos = low_bound; + } + + if(pos < low_bound) { + pos += level / 2; + } + + if(pos + m * group_num >= n) { + break; + } + + rel_pos = pos % level; + T carry = 0.0; + for(int i = 1; i <= min(k, level); i++) { + T k_value = group_start[low_bound - i]; + const T* k_factors = factor_cache + m * (i - 1); + T factor = k_factors[rel_pos]; + carry += k_value * factor; + } + + group_start[pos] += carry; + __syncthreads(); + } + + if(pos >= m - k) { + if(carries != NULL) { + section_carries[pos - (m - k)] = group_start[pos]; + } + } +} + +template +__global__ void correct_carries_sos( + const int m, const int n_blocks, const int carries_stride, + const int offset, const T* factors, T* carries) { + + extern __shared__ __align__(sizeof(T)) thrust::complex fcd3[4]; + T* factor_cache = reinterpret_cast(fcd3); + + int idx = threadIdx.x; + const int k = 2; + int pos = idx + (m - k); + T* row_carries = carries + carries_stride * blockIdx.x; + + factor_cache[2 * idx] = factors[pos]; + factor_cache[2 * idx + 1] = factors[m + pos]; + __syncthreads(); + + for(int i = offset; i < n_blocks; i++) { + T* this_carries = row_carries + k * (i + (1 - offset)); + T* prev_carries = row_carries + k * (i - offset); + + T carry = 0.0; + for(int j = 1; j <= k; j++) { + // const T* k_factors = factors + m * (j - 1); + // T factor = k_factors[pos]; + T factor = factor_cache[2 * idx + (j - 1)]; + T k_value = prev_carries[k - j]; + carry += factor * k_value; + } + + this_carries[idx] += carry; + __syncthreads(); + } +} + +template +__global__ void second_pass_iir_sos( + const int m, const int n, const int carries_stride, + const int n_blocks, const int offset, const T* factors, + T* carries, T* out) { + + extern __shared__ __align__(sizeof(T)) thrust::complex fcd2[2 * 1024]; + T* factor_cache = reinterpret_cast(fcd2); + + extern __shared__ __align__(sizeof(T)) thrust::complex c_d[2]; + T* carries_cache = reinterpret_cast(c_d); + + int idx = blockDim.x * (blockIdx.x % n_blocks) + threadIdx.x; + idx += offset * m; + + int row_num = blockIdx.x / n_blocks; + int n_group = idx / m; + int pos = idx % m; + const int k = 2; + + T* out_off = out + row_num * n; + T* carries_off = carries + row_num * carries_stride; + const T* prev_carries = carries_off + (n_group - offset) * k; + + if(pos < k) { + carries_cache[pos] = prev_carries[pos]; + } + + if(idx >= n) { + return; + } + + factor_cache[pos] = factors[pos]; + factor_cache[pos + m] = factors[pos + m]; + __syncthreads(); + + T carry = 0.0; + for(int i = 1; i <= k; i++) { + const T* k_factors = factor_cache + m * (i - 1); + T factor = k_factors[pos]; + T k_value = carries_cache[k - i]; + carry += factor * k_value; + } + + out_off[idx] += carry; +} + +template +__global__ void fir_sos( + const int m, const int n, const int carries_stride, const int n_blocks, + const int offset, const T* sos, T* carries, T* out) { + + extern __shared__ __align__(sizeof(T)) thrust::complex fir_cc[1024 + 2]; + T* fir_cache = reinterpret_cast(fir_cc); + + extern __shared__ __align__(sizeof(T)) thrust::complex fir_b[3]; + T* b = reinterpret_cast(fir_b); + + int idx = blockDim.x * (blockIdx.x % n_blocks) + threadIdx.x; + int row_num = blockIdx.x / n_blocks; + int n_group = idx / m; + int pos = idx % m; + const int k = 2; + + T* out_row = out + row_num * n; + T* out_off = out_row + n_group * m; + T* carries_off = carries + row_num * carries_stride; + T* this_carries = carries_off + k * (n_group + (1 - offset)); + T* group_carries = carries_off + (n_group - offset) * k; + + if(pos <= k) { + b[pos] = sos[pos]; + } + + if(pos < k) { + if(offset && n_group == 0) { + fir_cache[pos] = 0; + } else { + fir_cache[pos] = group_carries[pos]; + } + } + + if(idx >= n) { + return; + } + + fir_cache[pos + k] = out_off[pos]; + __syncthreads(); + + T acc = 0.0; + for(int i = k; i >= 0; i--) { + acc += fir_cache[pos + i] * b[k - i]; + } + + out_off[pos] = acc; +} +""" # NOQA + +IIR_MODULE = cupy.RawModule( + code=IIR_KERNEL, options=('-std=c++11',), + name_expressions=[f'compute_correction_factors<{x}, {y}>' + for x, y in TYPE_PAIR_NAMES] + + [f'correct_carries<{x}>' for x in TYPE_NAMES] + + [f'first_pass_iir<{x}>' for x in TYPE_NAMES] + + [f'second_pass_iir<{x}>' for x in TYPE_NAMES]) + +IIR_SOS_MODULE = cupy.RawModule( + code=IIR_SOS_KERNEL, options=('-std=c++11',), + name_expressions=[f'compute_correction_factors_sos<{x}, {y}>' + for x, y in TYPE_PAIR_NAMES] + + [f'pick_carries<{x}>' for x in TYPE_NAMES] + + [f'correct_carries_sos<{x}>' for x in TYPE_NAMES] + + [f'first_pass_iir_sos<{x}>' for x in TYPE_NAMES] + + [f'second_pass_iir_sos<{x}>' for x in TYPE_NAMES] + + [f'fir_sos<{x}>' for x in TYPE_NAMES]) + + +def _get_module_func(module, func_name, *template_args): + args_dtypes = [_get_typename(arg.dtype) for arg in template_args] + template = ', '.join(args_dtypes) + kernel_name = f'{func_name}<{template}>' if template_args else func_name + kernel = module.get_function(kernel_name) + return kernel + + +def collapse_2d(x, axis): + x = cupy.moveaxis(x, axis, -1) + x_shape = x.shape + x = x.reshape(-1, x.shape[-1]) + if not x.flags.c_contiguous: + x = x.copy() + return x, x_shape + + +def collapse_2d_rest(x, axis): + x = cupy.moveaxis(x, axis + 1, -1) + x_shape = x.shape + x = x.reshape(x.shape[0], -1, x.shape[-1]) + if not x.flags.c_contiguous: + x = x.copy() + return x, x_shape + + +def compute_correction_factors(a, block_sz, dtype): + k = a.size + correction = cupy.eye(k, dtype=dtype) + correction = cupy.c_[ + correction[::-1], cupy.empty((k, block_sz), dtype=dtype)] + corr_kernel = _get_module_func( + IIR_MODULE, 'compute_correction_factors', correction, a) + corr_kernel((k,), (1,), (block_sz, k, a, correction)) + return correction + + +def apply_iir(x, a, axis=-1, zi=None, dtype=None, block_sz=1024): + # GPU throughput is faster when using single precision floating point + # numbers + # x = x.astype(cupy.float32) + if dtype is None: + dtype = cupy.result_type(x.dtype, a.dtype) + + a = a.astype(dtype) + + if zi is not None: + zi = zi.astype(dtype) + + x_shape = x.shape + x_ndim = x.ndim + axis = _normalize_axis_index(axis, x_ndim) + k = a.size + n = x_shape[axis] + + if x_ndim > 1: + x, x_shape = collapse_2d(x, axis) + if zi is not None: + zi, _ = collapse_2d(zi, axis) + + out = cupy.array(x, dtype=dtype, copy=True) + + num_rows = 1 if x.ndim == 1 else x.shape[0] + n_blocks = (n + block_sz - 1) // block_sz + total_blocks = num_rows * n_blocks + + correction = cupy.eye(k, dtype=dtype) + correction = cupy.c_[ + correction[::-1], cupy.empty((k, block_sz), dtype=dtype)] + carries = cupy.empty( + (num_rows, n_blocks, k), dtype=dtype) + + corr_kernel = _get_module_func( + IIR_MODULE, 'compute_correction_factors', correction, a) + first_pass_kernel = _get_module_func(IIR_MODULE, 'first_pass_iir', out) + second_pass_kernel = _get_module_func(IIR_MODULE, 'second_pass_iir', out) + carry_correction_kernel = _get_module_func( + IIR_MODULE, 'correct_carries', out) + + corr_kernel((k,), (1,), (block_sz, k, a, correction)) + first_pass_kernel((total_blocks,), (block_sz // 2,), + (block_sz, k, n, n_blocks, (n_blocks) * k, + correction, out, carries)) + + if zi is not None: + if zi.ndim == 1: + zi = cupy.broadcast_to(zi, (num_rows, 1, zi.size)) + elif zi.ndim == 2: + zi = zi.reshape(num_rows, 1, zi.shape[-1]) + + if carries.size == 0: + carries = zi + else: + carries = cupy.concatenate((zi, carries), axis=1) + + if not carries.flags.c_contiguous: + carries = carries.copy() + + if n_blocks > 1 or zi is not None: + starting_group = int(zi is None) + blocks_to_merge = n_blocks - starting_group + carries_stride = (n_blocks + (1 - starting_group)) * k + carry_correction_kernel( + (num_rows,), (k,), + (block_sz, k, n_blocks, carries_stride, starting_group, + correction, carries)) + second_pass_kernel( + (num_rows * blocks_to_merge,), (block_sz,), + (block_sz, k, n, carries_stride, blocks_to_merge, + starting_group, correction, carries, out)) + + if x_ndim > 1: + out = out.reshape(x_shape) + out = cupy.moveaxis(out, -1, axis) + if not out.flags.c_contiguous: + out = out.copy() + + return out + + +def compute_correction_factors_sos(sos, block_sz, dtype): + n_sections = sos.shape[0] + correction = cupy.empty((n_sections, 2, block_sz), dtype=dtype) + corr_kernel = _get_module_func( + IIR_SOS_MODULE, 'compute_correction_factors_sos', correction, sos) + corr_kernel((n_sections,), (2,), (block_sz, sos, correction)) + return correction + + +def apply_iir_sos(x, sos, axis=-1, zi=None, dtype=None, block_sz=1024, + apply_fir=True, out=None): + if dtype is None: + dtype = cupy.result_type(x.dtype, sos.dtype) + + sos = sos.astype(dtype) + + if zi is not None: + zi = zi.astype(dtype) + + x_shape = x.shape + x_ndim = x.ndim + n_sections = sos.shape[0] + axis = _normalize_axis_index(axis, x_ndim) + k = 2 + n = x_shape[axis] + zi_shape = None + + if x_ndim > 1: + x, x_shape = collapse_2d(x, axis) + + if zi is not None: + zi, zi_shape = collapse_2d_rest(zi, axis) + + if out is None: + out = cupy.array(x, dtype=dtype, copy=True) + + num_rows = 1 if x.ndim == 1 else x.shape[0] + n_blocks = (n + block_sz - 1) // block_sz + total_blocks = num_rows * n_blocks + + correction = compute_correction_factors_sos(sos, block_sz, dtype) + carries = cupy.empty( + (num_rows, n_blocks, k), dtype=dtype) + all_carries = carries + zi_out = None + if zi is not None: + zi_out = cupy.empty_like(zi) + all_carries = cupy.empty( + (num_rows, n_blocks + 1, k), dtype=dtype) + + first_pass_kernel = _get_module_func( + IIR_SOS_MODULE, 'first_pass_iir_sos', out) + second_pass_kernel = _get_module_func( + IIR_SOS_MODULE, 'second_pass_iir_sos', out) + carry_correction_kernel = _get_module_func( + IIR_SOS_MODULE, 'correct_carries_sos', out) + fir_kernel = _get_module_func(IIR_SOS_MODULE, 'fir_sos', out) + carries_kernel = _get_module_func(IIR_SOS_MODULE, 'pick_carries', out) + + starting_group = int(zi is None) + blocks_to_merge = n_blocks - starting_group + carries_stride = (n_blocks + (1 - starting_group)) * k + + carries_kernel((num_rows * n_blocks,), (k,), + (block_sz, n, carries_stride, n_blocks, starting_group, + out, all_carries)) + + for s in range(n_sections): + b = sos[s] + if zi is not None: + section_zi = zi[s, :, :2] + all_carries[:, 0, :] = section_zi + zi_out[s, :, :2] = axis_slice(out, n - 2, n) + + if apply_fir: + fir_kernel((num_rows * n_blocks,), (block_sz,), + (block_sz, n, carries_stride, n_blocks, starting_group, + b, all_carries, out)) + + first_pass_kernel( + (total_blocks,), (block_sz // 2,), + (block_sz, n, n_blocks, correction[s], out, carries)) + + if n_blocks > 1 or zi is not None: + if zi is not None: + section_zi = zi[s, :, 2:] + all_carries[:, 0, :] = section_zi + all_carries[:, 1:, :] = carries + + carry_correction_kernel( + (num_rows,), (k,), + (block_sz, n_blocks, carries_stride, starting_group, + correction[s], all_carries)) + second_pass_kernel( + (num_rows * blocks_to_merge,), (block_sz,), + (block_sz, n, carries_stride, blocks_to_merge, + starting_group, correction[s], all_carries, out)) + + if apply_fir: + carries_kernel( + (num_rows * n_blocks,), (k,), + (block_sz, n, carries_stride, n_blocks, starting_group, + out, all_carries)) + + if zi is not None: + zi_out[s, :, 2:] = axis_slice(out, n - 2, n) + + if x_ndim > 1: + out = out.reshape(x_shape) + out = cupy.moveaxis(out, -1, axis) + if not out.flags.c_contiguous: + out = out.copy() + + if zi is not None: + zi_out = zi_out.reshape(zi_shape) + if len(zi_shape) > 2: + zi_out = cupy.moveaxis(zi_out, -1, axis) + if not zi_out.flags.c_contiguous: + zi_out = zi_out.copy() + + if zi is not None: + return out, zi_out + return out diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_lti_conversion.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_lti_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..5c27ba33a16bf88c54e499be1db7ca0eaaae2cf6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_lti_conversion.py @@ -0,0 +1,82 @@ +import cupy + + +def _none_to_empty_2d(arg): + if arg is None: + return cupy.zeros((0, 0)) + else: + return arg + + +def _atleast_2d_or_none(arg): + if arg is not None: + return cupy.atleast_2d(arg) + + +def _shape_or_none(M): + if M is not None: + return M.shape + else: + return (None,) * 2 + + +def _choice_not_none(*args): + for arg in args: + if arg is not None: + return arg + + +def _restore(M, shape): + if M.shape == (0, 0): + return cupy.zeros(shape) + else: + if M.shape != shape: + raise ValueError("The input arrays have incompatible shapes.") + return M + + +def abcd_normalize(A=None, B=None, C=None, D=None): + """Check state-space matrices and ensure they are 2-D. + + If enough information on the system is provided, that is, enough + properly-shaped arrays are passed to the function, the missing ones + are built from this information, ensuring the correct number of + rows and columns. Otherwise a ValueError is raised. + + Parameters + ---------- + A, B, C, D : array_like, optional + State-space matrices. All of them are None (missing) by default. + See `ss2tf` for format. + + Returns + ------- + A, B, C, D : array + Properly shaped state-space matrices. + + Raises + ------ + ValueError + If not enough information on the system was provided. + + """ + A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D)) + + MA, NA = _shape_or_none(A) + MB, NB = _shape_or_none(B) + MC, NC = _shape_or_none(C) + MD, ND = _shape_or_none(D) + + p = _choice_not_none(MA, MB, NC) + q = _choice_not_none(NB, ND) + r = _choice_not_none(MC, MD) + if p is None or q is None or r is None: + raise ValueError("Not enough information on the system.") + + A, B, C, D = map(_none_to_empty_2d, (A, B, C, D)) + A = _restore(A, (p, p)) + B = _restore(B, (p, q)) + C = _restore(C, (r, p)) + D = _restore(D, (r, q)) + + return A, B, C, D diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_ltisys.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_ltisys.py new file mode 100644 index 0000000000000000000000000000000000000000..fbfa4d1748b60a902be38ee0319a4072b2931909 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_ltisys.py @@ -0,0 +1,3121 @@ +""" +ltisys -- a collection of classes and functions for modeling linear +time invariant systems. +""" +import warnings +import copy +from math import sqrt + +import cupy + +from cupyx.scipy import linalg +from cupyx.scipy.interpolate import make_interp_spline +from cupyx.scipy.linalg import expm, block_diag + +from cupyx.scipy.signal._lti_conversion import ( + _atleast_2d_or_none, abcd_normalize) +from cupyx.scipy.signal._iir_filter_conversions import ( + normalize, tf2zpk, tf2ss, zpk2ss, ss2tf, ss2zpk, zpk2tf) +from cupyx.scipy.signal._filter_design import ( + freqz, freqz_zpk, freqs, freqs_zpk) + + +class LinearTimeInvariant: + def __new__(cls, *system, **kwargs): + """Create a new object, don't allow direct instances.""" + if cls is LinearTimeInvariant: + raise NotImplementedError('The LinearTimeInvariant class is not ' + 'meant to be used directly, use `lti` ' + 'or `dlti` instead.') + return super().__new__(cls) + + def __init__(self): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + super().__init__() + + self.inputs = None + self.outputs = None + self._dt = None + + @property + def dt(self): + """Return the sampling time of the system, `None` for `lti` systems.""" + return self._dt + + @property + def _dt_dict(self): + if self.dt is None: + return {} + else: + return {'dt': self.dt} + + @property + def zeros(self): + """Zeros of the system.""" + return self.to_zpk().zeros + + @property + def poles(self): + """Poles of the system.""" + return self.to_zpk().poles + + def _as_ss(self): + """Convert to `StateSpace` system, without copying. + + Returns + ------- + sys: StateSpace + The `StateSpace` system. If the class is already an instance of + `StateSpace` then this instance is returned. + """ + if isinstance(self, StateSpace): + return self + else: + return self.to_ss() + + def _as_zpk(self): + """Convert to `ZerosPolesGain` system, without copying. + + Returns + ------- + sys: ZerosPolesGain + The `ZerosPolesGain` system. If the class is already an instance of + `ZerosPolesGain` then this instance is returned. + """ + if isinstance(self, ZerosPolesGain): + return self + else: + return self.to_zpk() + + def _as_tf(self): + """Convert to `TransferFunction` system, without copying. + + Returns + ------- + sys: ZerosPolesGain + The `TransferFunction` system. If the class is already an instance + of `TransferFunction` then this instance is returned. + """ + if isinstance(self, TransferFunction): + return self + else: + return self.to_tf() + + +class lti(LinearTimeInvariant): + r""" + Continuous-time linear time invariant system base class. + + Parameters + ---------- + *system : arguments + The `lti` class can be instantiated with either 2, 3 or 4 arguments. + The following gives the number of arguments and the corresponding + continuous-time subclass that is created: + + * 2: `TransferFunction`: (numerator, denominator) + * 3: `ZerosPolesGain`: (zeros, poles, gain) + * 4: `StateSpace`: (A, B, C, D) + + Each argument can be an array or a sequence. + + See Also + -------- + scipy.signal.lti + ZerosPolesGain, StateSpace, TransferFunction, dlti + + Notes + ----- + `lti` instances do not exist directly. Instead, `lti` creates an instance + of one of its subclasses: `StateSpace`, `TransferFunction` or + `ZerosPolesGain`. + + If (numerator, denominator) is passed in for ``*system``, coefficients for + both the numerator and denominator should be specified in descending + exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3, + 5]``). + + Changing the value of properties that are not directly part of the current + system representation (such as the `zeros` of a `StateSpace` system) is + very inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + """ + def __new__(cls, *system): + """Create an instance of the appropriate subclass.""" + if cls is lti: + N = len(system) + if N == 2: + return TransferFunctionContinuous.__new__( + TransferFunctionContinuous, *system) + elif N == 3: + return ZerosPolesGainContinuous.__new__( + ZerosPolesGainContinuous, *system) + elif N == 4: + return StateSpaceContinuous.__new__(StateSpaceContinuous, + *system) + else: + raise ValueError("`system` needs to be an instance of `lti` " + "or have 2, 3 or 4 arguments.") + # __new__ was called from a subclass, let it call its own functions + return super().__new__(cls) + + def __init__(self, *system): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + super().__init__(*system) + + def impulse(self, X0=None, T=None, N=None): + """ + Return the impulse response of a continuous-time system. + See `impulse` for details. + """ + return impulse(self, X0=X0, T=T, N=N) + + def step(self, X0=None, T=None, N=None): + """ + Return the step response of a continuous-time system. + See `step` for details. + """ + return step(self, X0=X0, T=T, N=N) + + def output(self, U, T, X0=None): + """ + Return the response of a continuous-time system to input `U`. + See `lsim` for details. + """ + return lsim(self, U, T, X0=X0) + + def bode(self, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a continuous-time system. + + Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude + [dB] and phase [deg]. See `bode` for details. + """ + return bode(self, w=w, n=n) + + def freqresp(self, w=None, n=10000): + """ + Calculate the frequency response of a continuous-time system. + + Returns a 2-tuple containing arrays of frequencies [rad/s] and + complex magnitude. + See `freqresp` for details. + """ + return freqresp(self, w=w, n=n) + + def to_discrete(self, dt, method='zoh', alpha=None): + """Return a discretized version of the current system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` + """ + raise NotImplementedError('to_discrete is not implemented for this ' + 'system class.') + + +class dlti(LinearTimeInvariant): + r""" + Discrete-time linear time invariant system base class. + + Parameters + ---------- + *system: arguments + The `dlti` class can be instantiated with either 2, 3 or 4 arguments. + The following gives the number of arguments and the corresponding + discrete-time subclass that is created: + + * 2: `TransferFunction`: (numerator, denominator) + * 3: `ZerosPolesGain`: (zeros, poles, gain) + * 4: `StateSpace`: (A, B, C, D) + + Each argument can be an array or a sequence. + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to ``True`` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + scipy.signal.dlti + ZerosPolesGain, StateSpace, TransferFunction, lti + + Notes + ----- + `dlti` instances do not exist directly. Instead, `dlti` creates an instance + of one of its subclasses: `StateSpace`, `TransferFunction` or + `ZerosPolesGain`. + + Changing the value of properties that are not directly part of the current + system representation (such as the `zeros` of a `StateSpace` system) is + very inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + + If (numerator, denominator) is passed in for ``*system``, coefficients for + both the numerator and denominator should be specified in descending + exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3, + 5]``). + """ + def __new__(cls, *system, **kwargs): + """Create an instance of the appropriate subclass.""" + if cls is dlti: + N = len(system) + if N == 2: + return TransferFunctionDiscrete.__new__( + TransferFunctionDiscrete, *system, **kwargs) + elif N == 3: + return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete, + *system, **kwargs) + elif N == 4: + return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, + **kwargs) + else: + raise ValueError("`system` needs to be an instance of `dlti` " + "or have 2, 3 or 4 arguments.") + # __new__ was called from a subclass, let it call its own functions + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """ + Initialize the `lti` baseclass. + + The heavy lifting is done by the subclasses. + """ + dt = kwargs.pop('dt', True) + super().__init__(*system, **kwargs) + + self.dt = dt + + @property + def dt(self): + """Return the sampling time of the system.""" + return self._dt + + @dt.setter + def dt(self, dt): + self._dt = dt + + def impulse(self, x0=None, t=None, n=None): + """ + Return the impulse response of the discrete-time `dlti` system. + See `dimpulse` for details. + """ + return dimpulse(self, x0=x0, t=t, n=n) + + def step(self, x0=None, t=None, n=None): + """ + Return the step response of the discrete-time `dlti` system. + See `dstep` for details. + """ + return dstep(self, x0=x0, t=t, n=n) + + def output(self, u, t, x0=None): + """ + Return the response of the discrete-time system to input `u`. + See `dlsim` for details. + """ + return dlsim(self, u, t, x0=x0) + + def bode(self, w=None, n=100): + r""" + Calculate Bode magnitude and phase data of a discrete-time system. + + Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude + [dB] and phase [deg]. See `dbode` for details. + """ + return dbode(self, w=w, n=n) + + def freqresp(self, w=None, n=10000, whole=False): + """ + Calculate the frequency response of a discrete-time system. + + Returns a 2-tuple containing arrays of frequencies [rad/s] and + complex magnitude. + See `dfreqresp` for details. + + """ + return dfreqresp(self, w=w, n=n, whole=whole) + + +class TransferFunction(LinearTimeInvariant): + r"""Linear Time Invariant system class in transfer function form. + + Represents the system as the continuous-time transfer function + :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the + discrete-time transfer function + :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + `TransferFunction` systems inherit additional + functionality from the `lti`, respectively the `dlti` classes, depending on + which system representation is used. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + See Also + -------- + scipy.signal.TransferFunction + ZerosPolesGain, StateSpace, lti, dlti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be + represented as ``[1, 3, 5]``) + """ + def __new__(cls, *system, **kwargs): + """Handle object conversion if input is an instance of lti.""" + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_tf() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is TransferFunction: + if kwargs.get('dt') is None: + return TransferFunctionContinuous.__new__( + TransferFunctionContinuous, + *system, + **kwargs) + else: + return TransferFunctionDiscrete.__new__( + TransferFunctionDiscrete, + *system, + **kwargs) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the state space LTI system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + # Remove system arguments, not needed by parents anymore + super().__init__(**kwargs) + + self._num = None + self._den = None + + self.num, self.den = normalize(*system) + + def __repr__(self): + """Return representation of the system's transfer function""" + return '{}(\n{},\n{},\ndt: {}\n)'.format( + self.__class__.__name__, + repr(self.num), + repr(self.den), + repr(self.dt), + ) + + @property + def num(self): + """Numerator of the `TransferFunction` system.""" + return self._num + + @num.setter + def num(self, num): + self._num = cupy.atleast_1d(num) + + # Update dimensions + if len(self.num.shape) > 1: + self.outputs, self.inputs = self.num.shape + else: + self.outputs = 1 + self.inputs = 1 + + @property + def den(self): + """Denominator of the `TransferFunction` system.""" + return self._den + + @den.setter + def den(self, den): + self._den = cupy.atleast_1d(den) + + def _copy(self, system): + """ + Copy the parameters of another `TransferFunction` object + + Parameters + ---------- + system : `TransferFunction` + The `StateSpace` system that is to be copied + + """ + self.num = system.num + self.den = system.den + + def to_tf(self): + """ + Return a copy of the current `TransferFunction` system. + + Returns + ------- + sys : instance of `TransferFunction` + The current system (copy) + + """ + return copy.deepcopy(self) + + def to_zpk(self): + """ + Convert system representation to `ZerosPolesGain`. + + Returns + ------- + sys : instance of `ZerosPolesGain` + Zeros, poles, gain representation of the current system + + """ + return ZerosPolesGain(*tf2zpk(self.num, self.den), + **self._dt_dict) + + def to_ss(self): + """ + Convert system representation to `StateSpace`. + + Returns + ------- + sys : instance of `StateSpace` + State space model of the current system + + """ + return StateSpace(*tf2ss(self.num, self.den), + **self._dt_dict) + + @staticmethod + def _z_to_zinv(num, den): + """Change a transfer function from the variable `z` to `z**-1`. + + Parameters + ---------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree of 'z'. + That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. + + Returns + ------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of ascending degree of 'z**-1'. + That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. + """ + diff = len(num) - len(den) + if diff > 0: + den = cupy.hstack((cupy.zeros(diff), den)) + elif diff < 0: + num = cupy.hstack((cupy.zeros(-diff), num)) + return num, den + + @staticmethod + def _zinv_to_z(num, den): + """Change a transfer function from the variable `z` to `z**-1`. + + Parameters + ---------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of ascending degree of 'z**-1'. + That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. + + Returns + ------- + num, den: 1d array_like + Sequences representing the coefficients of the numerator and + denominator polynomials, in order of descending degree of 'z'. + That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. + """ + diff = len(num) - len(den) + if diff > 0: + den = cupy.hstack((den, cupy.zeros(diff))) + elif diff < 0: + num = cupy.hstack((num, cupy.zeros(-diff))) + return num, den + + +class TransferFunctionContinuous(TransferFunction, lti): + r""" + Continuous-time Linear Time Invariant system in transfer function form. + + Represents the system as the transfer function + :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + Continuous-time `TransferFunction` systems inherit additional + functionality from the `lti` class. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + + See Also + -------- + scipy.signal.TransferFunction + ZerosPolesGain, StateSpace, lti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g. ``s^2 + 3s + 5`` would be represented as + ``[1, 3, 5]``) + + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `TransferFunction` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `StateSpace` + """ + return TransferFunction(*cont2discrete((self.num, self.den), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class TransferFunctionDiscrete(TransferFunction, dlti): + r""" + Discrete-time Linear Time Invariant system in transfer function form. + + Represents the system as the transfer function + :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where + :math:`b` are elements of the numerator `num`, :math:`a` are elements of + the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. + Discrete-time `TransferFunction` systems inherit additional functionality + from the `dlti` class. + + Parameters + ---------- + *system: arguments + The `TransferFunction` class can be instantiated with 1 or 2 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 2: array_like: (numerator, denominator) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + scipy.signal.TransferFunctionDiscrete + ZerosPolesGain, StateSpace, dlti + tf2ss, tf2zpk, tf2sos + + Notes + ----- + Changing the value of properties that are not part of the + `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. + + If (numerator, denominator) is passed in for ``*system``, coefficients + for both the numerator and denominator should be specified in descending + exponent order (e.g., ``z^2 + 3z + 5`` would be represented as + ``[1, 3, 5]``). + """ + pass + + +class ZerosPolesGain(LinearTimeInvariant): + r""" + Linear Time Invariant system class in zeros, poles, gain form. + + Represents the system as the continuous- or discrete-time transfer function + :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is + the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. + `ZerosPolesGain` systems inherit additional functionality from the `lti`, + respectively the `dlti` classes, depending on which system representation + is used. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + + See Also + -------- + scipy.signal.ZerosPolesGain + TransferFunction, StateSpace, lti, dlti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + """ + def __new__(cls, *system, **kwargs): + """Handle object conversion if input is an instance of `lti`""" + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_zpk() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is ZerosPolesGain: + if kwargs.get('dt') is None: + return ZerosPolesGainContinuous.__new__( + ZerosPolesGainContinuous, + *system, + **kwargs) + else: + return ZerosPolesGainDiscrete.__new__( + ZerosPolesGainDiscrete, + *system, + **kwargs + ) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the zeros, poles, gain system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + super().__init__(**kwargs) + + self._zeros = None + self._poles = None + self._gain = None + + self.zeros, self.poles, self.gain = system + + def __repr__(self): + """Return representation of the `ZerosPolesGain` system.""" + return '{}(\n{},\n{},\n{},\ndt: {}\n)'.format( + self.__class__.__name__, + repr(self.zeros), + repr(self.poles), + repr(self.gain), + repr(self.dt), + ) + + @property + def zeros(self): + """Zeros of the `ZerosPolesGain` system.""" + return self._zeros + + @zeros.setter + def zeros(self, zeros): + self._zeros = cupy.atleast_1d(zeros) + + # Update dimensions + if len(self.zeros.shape) > 1: + self.outputs, self.inputs = self.zeros.shape + else: + self.outputs = 1 + self.inputs = 1 + + @property + def poles(self): + """Poles of the `ZerosPolesGain` system.""" + return self._poles + + @poles.setter + def poles(self, poles): + self._poles = cupy.atleast_1d(poles) + + @property + def gain(self): + """Gain of the `ZerosPolesGain` system.""" + return self._gain + + @gain.setter + def gain(self, gain): + self._gain = gain + + def _copy(self, system): + """ + Copy the parameters of another `ZerosPolesGain` system. + + Parameters + ---------- + system : instance of `ZerosPolesGain` + The zeros, poles gain system that is to be copied + + """ + self.poles = system.poles + self.zeros = system.zeros + self.gain = system.gain + + def to_tf(self): + """ + Convert system representation to `TransferFunction`. + + Returns + ------- + sys : instance of `TransferFunction` + Transfer function of the current system + + """ + return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain), + **self._dt_dict) + + def to_zpk(self): + """ + Return a copy of the current 'ZerosPolesGain' system. + + Returns + ------- + sys : instance of `ZerosPolesGain` + The current system (copy) + + """ + return copy.deepcopy(self) + + def to_ss(self): + """ + Convert system representation to `StateSpace`. + + Returns + ------- + sys : instance of `StateSpace` + State space model of the current system + + """ + return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain), + **self._dt_dict) + + +class ZerosPolesGainContinuous(ZerosPolesGain, lti): + r""" + Continuous-time Linear Time Invariant system in zeros, poles, gain form. + + Represents the system as the continuous time transfer function + :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is + the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. + Continuous-time `ZerosPolesGain` systems inherit additional functionality + from the `lti` class. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + + See Also + -------- + TransferFunction, StateSpace, lti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + + Examples + -------- + Construct the transfer function + :math:`H(s)=\frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: + + >>> from scipy import signal + + >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) + ZerosPolesGainContinuous( + array([1, 2]), + array([3, 4]), + 5, + dt: None + ) + + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `ZerosPolesGain` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `ZerosPolesGain` + """ + return ZerosPolesGain( + *cont2discrete((self.zeros, self.poles, self.gain), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class ZerosPolesGainDiscrete(ZerosPolesGain, dlti): + r""" + Discrete-time Linear Time Invariant system in zeros, poles, gain form. + + Represents the system as the discrete-time transfer function + :math:`H(z)=k \prod_i (z - q[i]) / \prod_j (z - p[j])`, where :math:`k` is + the `gain`, :math:`q` are the `zeros` and :math:`p` are the `poles`. + Discrete-time `ZerosPolesGain` systems inherit additional functionality + from the `dlti` class. + + Parameters + ---------- + *system : arguments + The `ZerosPolesGain` class can be instantiated with 1 or 3 + arguments. The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 3: array_like: (zeros, poles, gain) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + scipy.signal.ZerosPolesGainDiscrete + TransferFunction, StateSpace, dlti + zpk2ss, zpk2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` + state-space matrices) is very inefficient and may lead to numerical + inaccuracies. It is better to convert to the specific system + representation first. For example, call ``sys = sys.to_ss()`` before + accessing/changing the A, B, C, D system matrices. + """ + pass + + +class StateSpace(LinearTimeInvariant): + r""" + Linear Time Invariant system in state-space form. + + Represents the system as the continuous-time, first order differential + equation :math:`\dot{x} = A x + B u` or the discrete-time difference + equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems + inherit additional functionality from the `lti`, respectively the `dlti` + classes, depending on which system representation is used. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 4 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `None` + (continuous-time). Must be specified as a keyword argument, for + example, ``dt=0.1``. + + See Also + -------- + scipy.signal.StateSpace + TransferFunction, ZerosPolesGain, lti, dlti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + """ + + # Override NumPy binary operations and ufuncs + __array_priority__ = 100.0 + __array_ufunc__ = None + + def __new__(cls, *system, **kwargs): + """Create new StateSpace object and settle inheritance.""" + # Handle object conversion if input is an instance of `lti` + if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): + return system[0].to_ss() + + # Choose whether to inherit from `lti` or from `dlti` + if cls is StateSpace: + if kwargs.get('dt') is None: + return StateSpaceContinuous.__new__(StateSpaceContinuous, + *system, **kwargs) + else: + return StateSpaceDiscrete.__new__(StateSpaceDiscrete, + *system, **kwargs) + + # No special conversion needed + return super().__new__(cls) + + def __init__(self, *system, **kwargs): + """Initialize the state space lti/dlti system.""" + # Conversion of lti instances is handled in __new__ + if isinstance(system[0], LinearTimeInvariant): + return + + # Remove system arguments, not needed by parents anymore + super().__init__(**kwargs) + + self._A = None + self._B = None + self._C = None + self._D = None + + self.A, self.B, self.C, self.D = abcd_normalize(*system) + + def __repr__(self): + """Return representation of the `StateSpace` system.""" + return '{}(\n{},\n{},\n{},\n{},\ndt: {}\n)'.format( + self.__class__.__name__, + repr(self.A), + repr(self.B), + repr(self.C), + repr(self.D), + repr(self.dt), + ) + + def _check_binop_other(self, other): + return isinstance(other, (StateSpace, cupy.ndarray, float, complex, + cupy.number, int)) + + def __mul__(self, other): + """ + Post-multiply another system or a scalar + + Handles multiplication of systems in the sense of a frequency domain + multiplication. That means, given two systems E1(s) and E2(s), their + multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s) + is equivalent to first applying E2(s), and then E1(s). + + Notes + ----- + For SISO systems the order of system application does not matter. + However, for MIMO systems, where the two systems are matrices, the + order above ensures standard Matrix multiplication rules apply. + """ + if not self._check_binop_other(other): + return NotImplemented + + if isinstance(other, StateSpace): + # Disallow mix of discrete and continuous systems. + if type(other) is not type(self): + return NotImplemented + + if self.dt != other.dt: + raise TypeError('Cannot multiply systems with different `dt`.') + + n1 = self.A.shape[0] + n2 = other.A.shape[0] + + # Interconnection of systems + # x1' = A1 x1 + B1 u1 + # y1 = C1 x1 + D1 u1 + # x2' = A2 x2 + B2 y1 + # y2 = C2 x2 + D2 y1 + # + # Plugging in with u1 = y2 yields + # [x1'] [A1 B1*C2 ] [x1] [B1*D2] + # [x2'] = [0 A2 ] [x2] + [B2 ] u2 + # [x1] + # y2 = [C1 D1*C2] [x2] + D1*D2 u2 + a = cupy.vstack((cupy.hstack((self.A, self.B @ other.C)), + cupy.hstack((cupy.zeros((n2, n1)), other.A)))) + b = cupy.vstack((self.B @ other.D, other.B)) + c = cupy.hstack((self.C, self.D @ other.C)) + d = self.D @ other.D + else: + # Assume that other is a scalar / matrix + # For post multiplication the input gets scaled + a = self.A + b = self.B @ other + c = self.C + d = self.D @ other + + common_dtype = cupy.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(cupy.asarray(a, dtype=common_dtype), + cupy.asarray(b, dtype=common_dtype), + cupy.asarray(c, dtype=common_dtype), + cupy.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __rmul__(self, other): + """Pre-multiply a scalar or matrix (but not StateSpace)""" + if not self._check_binop_other(other) or isinstance(other, StateSpace): + return NotImplemented + + # For pre-multiplication only the output gets scaled + a = self.A + b = self.B + c = other @ self.C + d = other @ self.D + + common_dtype = cupy.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(cupy.asarray(a, dtype=common_dtype), + cupy.asarray(b, dtype=common_dtype), + cupy.asarray(c, dtype=common_dtype), + cupy.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __neg__(self): + """Negate the system (equivalent to pre-multiplying by -1).""" + return StateSpace(self.A, self.B, -self.C, -self.D, **self._dt_dict) + + def __add__(self, other): + """ + Adds two systems in the sense of frequency domain addition. + """ + if not self._check_binop_other(other): + return NotImplemented + + if isinstance(other, StateSpace): + # Disallow mix of discrete and continuous systems. + if type(other) is not type(self): + raise TypeError('Cannot add {} and {}'.format(type(self), + type(other))) + + if self.dt != other.dt: + raise TypeError('Cannot add systems with different `dt`.') + # Interconnection of systems + # x1' = A1 x1 + B1 u + # y1 = C1 x1 + D1 u + # x2' = A2 x2 + B2 u + # y2 = C2 x2 + D2 u + # y = y1 + y2 + # + # Plugging in yields + # [x1'] [A1 0 ] [x1] [B1] + # [x2'] = [0 A2] [x2] + [B2] u + # [x1] + # y = [C1 C2] [x2] + [D1 + D2] u + a = block_diag(self.A, other.A) + b = cupy.vstack((self.B, other.B)) + c = cupy.hstack((self.C, other.C)) + d = self.D + other.D + else: + other = cupy.atleast_2d(other) + if self.D.shape == other.shape: + # A scalar/matrix is really just a static system + # (A=0, B=0, C=0) + a = self.A + b = self.B + c = self.C + d = self.D + other + else: + raise ValueError("Cannot add systems with incompatible " + "dimensions ({} and {})" + .format(self.D.shape, other.shape)) + + common_dtype = cupy.result_type(a.dtype, b.dtype, c.dtype, d.dtype) + return StateSpace(cupy.asarray(a, dtype=common_dtype), + cupy.asarray(b, dtype=common_dtype), + cupy.asarray(c, dtype=common_dtype), + cupy.asarray(d, dtype=common_dtype), + **self._dt_dict) + + def __sub__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return self.__add__(-other) + + def __radd__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return self.__add__(other) + + def __rsub__(self, other): + if not self._check_binop_other(other): + return NotImplemented + + return (-self).__add__(other) + + def __truediv__(self, other): + """ + Divide by a scalar + """ + # Division by non-StateSpace scalars + if not self._check_binop_other(other) or isinstance(other, StateSpace): + return NotImplemented + + if isinstance(other, cupy.ndarray) and other.ndim > 0: + # It's ambiguous what this means, so disallow it + raise ValueError( + "Cannot divide StateSpace by non-scalar numpy arrays") + + return self.__mul__(1/other) + + @property + def A(self): + """State matrix of the `StateSpace` system.""" + return self._A + + @A.setter + def A(self, A): + self._A = _atleast_2d_or_none(A) + + @property + def B(self): + """Input matrix of the `StateSpace` system.""" + return self._B + + @B.setter + def B(self, B): + self._B = _atleast_2d_or_none(B) + self.inputs = self.B.shape[-1] + + @property + def C(self): + """Output matrix of the `StateSpace` system.""" + return self._C + + @C.setter + def C(self, C): + self._C = _atleast_2d_or_none(C) + self.outputs = self.C.shape[0] + + @property + def D(self): + """Feedthrough matrix of the `StateSpace` system.""" + return self._D + + @D.setter + def D(self, D): + self._D = _atleast_2d_or_none(D) + + def _copy(self, system): + """ + Copy the parameters of another `StateSpace` system. + + Parameters + ---------- + system : instance of `StateSpace` + The state-space system that is to be copied + + """ + self.A = system.A + self.B = system.B + self.C = system.C + self.D = system.D + + def to_tf(self, **kwargs): + """ + Convert system representation to `TransferFunction`. + + Parameters + ---------- + kwargs : dict, optional + Additional keywords passed to `ss2zpk` + + Returns + ------- + sys : instance of `TransferFunction` + Transfer function of the current system + + """ + return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D, + **kwargs), **self._dt_dict) + + def to_zpk(self, **kwargs): + """ + Convert system representation to `ZerosPolesGain`. + + Parameters + ---------- + kwargs : dict, optional + Additional keywords passed to `ss2zpk` + + Returns + ------- + sys : instance of `ZerosPolesGain` + Zeros, poles, gain representation of the current system + + """ + return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D, + **kwargs), **self._dt_dict) + + def to_ss(self): + """ + Return a copy of the current `StateSpace` system. + + Returns + ------- + sys : instance of `StateSpace` + The current system (copy) + + """ + return copy.deepcopy(self) + + +class StateSpaceContinuous(StateSpace, lti): + r""" + Continuous-time Linear Time Invariant system in state-space form. + + Represents the system as the continuous-time, first order differential + equation :math:`\dot{x} = A x + B u`. + Continuous-time `StateSpace` systems inherit additional functionality + from the `lti` class. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 3 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `lti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + + See Also + -------- + scipy.signal.StateSpaceContinuous + TransferFunction, ZerosPolesGain, lti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + """ + + def to_discrete(self, dt, method='zoh', alpha=None): + """ + Returns the discretized `StateSpace` system. + + Parameters: See `cont2discrete` for details. + + Returns + ------- + sys: instance of `dlti` and `StateSpace` + """ + return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D), + dt, + method=method, + alpha=alpha)[:-1], + dt=dt) + + +class StateSpaceDiscrete(StateSpace, dlti): + r""" + Discrete-time Linear Time Invariant system in state-space form. + + Represents the system as the discrete-time difference equation + :math:`x[k+1] = A x[k] + B u[k]`. + `StateSpace` systems inherit additional functionality from the `dlti` + class. + + Parameters + ---------- + *system: arguments + The `StateSpace` class can be instantiated with 1 or 3 arguments. + The following gives the number of input arguments and their + interpretation: + + * 1: `dlti` system: (`StateSpace`, `TransferFunction` or + `ZerosPolesGain`) + * 4: array_like: (A, B, C, D) + dt: float, optional + Sampling time [s] of the discrete-time systems. Defaults to `True` + (unspecified sampling time). Must be specified as a keyword argument, + for example, ``dt=0.1``. + + See Also + -------- + scipy.signal.StateSpaceDiscrete + TransferFunction, ZerosPolesGain, dlti + ss2zpk, ss2tf, zpk2sos + + Notes + ----- + Changing the value of properties that are not part of the + `StateSpace` system representation (such as `zeros` or `poles`) is very + inefficient and may lead to numerical inaccuracies. It is better to + convert to the specific system representation first. For example, call + ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. + """ + pass + + +# ### lsim and related functions + +def lsim(system, U, T, X0=None, interp=True): + """ + Simulate output of a continuous-time linear system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `lti`) + * 2: (num, den) + * 3: (zeros, poles, gain) + * 4: (A, B, C, D) + + U : array_like + An input array describing the input at each time `T` + (interpolation is assumed between given times). If there are + multiple inputs, then each column of the rank-2 array + represents an input. If U = 0 or None, a zero input is used. + T : array_like + The time steps at which the input is defined and at which the + output is desired. Must be nonnegative, increasing, and equally spaced + X0 : array_like, optional + The initial conditions on the state vector (zero by default). + interp : bool, optional + Whether to use linear (True, the default) or zero-order-hold (False) + interpolation for the input array. + + Returns + ------- + T : 1D ndarray + Time values for the output. + yout : 1D ndarray + System response. + xout : ndarray + Time evolution of the state vector. + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + See Also + -------- + scipy.signal.lsim + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('lsim can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + T = cupy.atleast_1d(T) + if len(T.shape) != 1: + raise ValueError("T must be a rank-1 array.") + + A, B, C, D = map(cupy.asarray, (sys.A, sys.B, sys.C, sys.D)) + n_states = A.shape[0] + n_inputs = B.shape[1] + + n_steps = T.size + if X0 is None: + X0 = cupy.zeros(n_states, sys.A.dtype) + xout = cupy.empty((n_steps, n_states), sys.A.dtype) + + if T[0] == 0: + xout[0] = X0 + elif T[0] > 0: + # step forward to initial time, with zero input + xout[0] = X0 @ expm(A.T * T[0]) + else: + raise ValueError("Initial time must be nonnegative") + + no_input = (U is None or + (isinstance(U, (int, float)) and U == 0.) or + not cupy.any(U)) + + if n_steps == 1: + yout = cupy.squeeze(xout @ C.T) + if not no_input: + yout += cupy.squeeze(U @ D.T) + return T, cupy.squeeze(yout), cupy.squeeze(xout) + + dt = T[1] - T[0] + if not cupy.allclose(cupy.diff(T), dt): + raise ValueError("Time steps are not equally spaced.") + + if no_input: + # Zero input: just use matrix exponential + # take transpose because state is a row vector + expAT_dt = expm(A.T * dt) + for i in range(1, n_steps): + xout[i] = xout[i-1] @ expAT_dt + yout = cupy.squeeze(xout @ C.T) + return T, cupy.squeeze(yout), cupy.squeeze(xout) + + # Nonzero input + U = cupy.atleast_1d(U) + if U.ndim == 1: + U = U[:, None] + + if U.shape[0] != n_steps: + raise ValueError("U must have the same number of rows " + "as elements in T.") + + if U.shape[1] != n_inputs: + raise ValueError("System does not define that many inputs.") + + if not interp: + # Zero-order hold + # Algorithm: to integrate from time 0 to time dt, we solve + # xdot = A x + B u, x(0) = x0 + # udot = 0, u(0) = u0. + # + # Solution is + # [ x(dt) ] [ A*dt B*dt ] [ x0 ] + # [ u(dt) ] = exp [ 0 0 ] [ u0 ] + M = cupy.vstack([cupy.hstack([A * dt, B * dt]), + cupy.zeros((n_inputs, n_states + n_inputs))]) + # transpose everything because the state and input are row vectors + expMT = expm(M.T) + Ad = expMT[:n_states, :n_states] + Bd = expMT[n_states:, :n_states] + for i in range(1, n_steps): + xout[i] = xout[i-1] @ Ad + U[i-1] @ Bd + else: + # Linear interpolation between steps + # Algorithm: to integrate from time 0 to time dt, with linear + # interpolation between inputs u(0) = u0 and u(dt) = u1, we solve + # xdot = A x + B u, x(0) = x0 + # udot = (u1 - u0) / dt, u(0) = u0. + # + # Solution is + # [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ] + # [ u(dt) ] = exp [ 0 0 I ] [ u0 ] + # [u1 - u0] [ 0 0 0 ] [u1 - u0] + Mlst = [cupy.hstack([A * dt, B * dt, + cupy.zeros((n_states, n_inputs))]), + cupy.hstack([cupy.zeros((n_inputs, n_states + n_inputs)), + cupy.identity(n_inputs)]), + cupy.zeros((n_inputs, n_states + 2 * n_inputs))] + + M = cupy.vstack(Mlst) + expMT = expm(M.T) + Ad = expMT[:n_states, :n_states] + Bd1 = expMT[n_states+n_inputs:, :n_states] + Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1 + for i in range(1, n_steps): + xout[i] = ((xout[i-1] @ Ad) + (U[i-1] @ Bd0) + (U[i] @ Bd1)) + + yout = cupy.squeeze(xout @ C.T) + cupy.squeeze(U @ D.T) + return T, cupy.squeeze(yout), cupy.squeeze(xout) + + +def _default_response_times(A, n): + """Compute a reasonable set of time samples for the response time. + + This function is used by `impulse`, `impulse2`, `step` and `step2` + to compute the response time when the `T` argument to the function + is None. + + Parameters + ---------- + A : array_like + The system matrix, which is square. + n : int + The number of time samples to generate. + + Returns + ------- + t : ndarray + The 1-D array of length `n` of time samples at which the response + is to be computed. + + """ + # Create a reasonable time interval. + # TODO (scipy): This could use some more work. + # For example, what is expected when the system is unstable? + + # XXX: note this delegates to numpy because of eigvals. + # this can be avoided by e.g. using Gershgorin circles to estimate the + # eigenvalue locations, but that would change the default behavior. + + import numpy as np + vals = np.linalg.eigvals(A.get()) + vals = cupy.asarray(vals) + + r = cupy.min(cupy.abs(vals.real)) + if r == 0.0: + r = 1.0 + tc = 1.0 / r + t = cupy.linspace(0.0, 7 * tc, n) + return t + + +def impulse(system, X0=None, T=None, N=None): + """Impulse response of continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : array_like, optional + Initial state-vector. Defaults to zero. + T : array_like, optional + Time points. Computed if not given. + N : int, optional + The number of time points to compute (if `T` is not given). + + Returns + ------- + T : ndarray + A 1-D array of time points. + yout : ndarray + A 1-D array containing the impulse response of the system (except for + singularities at zero). + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + See Also + -------- + scipy.signal.impulse + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('impulse can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + if X0 is None: + X = cupy.squeeze(sys.B) + else: + X = cupy.squeeze(sys.B + X0) + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + else: + T = cupy.asarray(T) + + _, h, _ = lsim(sys, 0., T, X, interp=False) + return T, h + + +def step(system, X0=None, T=None, N=None): + """Step response of continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple of array_like + describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + X0 : array_like, optional + Initial state-vector (default is zero). + T : array_like, optional + Time points (computed if not given). + N : int, optional + Number of time points to compute if `T` is not given. + + Returns + ------- + T : 1D ndarray + Output time points. + yout : 1D ndarray + Step response of system. + + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + See Also + -------- + scipy.signal.step + + """ + if isinstance(system, lti): + sys = system._as_ss() + elif isinstance(system, dlti): + raise AttributeError('step can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_ss() + if N is None: + N = 100 + if T is None: + T = _default_response_times(sys.A, N) + else: + T = cupy.asarray(T) + U = cupy.ones(T.shape, sys.A.dtype) + vals = lsim(sys, U, T, X0=X0, interp=False) + return vals[0], vals[1] + + +def bode(system, w=None, n=100): + """ + Calculate Bode magnitude and phase data of a continuous-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + w : array_like, optional + Array of frequencies (in rad/s). Magnitude and phase data is calculated + for every value in this array. If not given a reasonable set will be + calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/s] + mag : 1D ndarray + Magnitude array [dB] + phase : 1D ndarray + Phase array [deg] + + See Also + -------- + scipy.signal.bode + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + """ + w, y = freqresp(system, w=w, n=n) + + mag = 20.0 * cupy.log10(abs(y)) + phase = cupy.unwrap(cupy.arctan2(y.imag, y.real)) * 180.0 / cupy.pi + + return w, mag, phase + + +def freqresp(system, w=None, n=10000): + r"""Calculate the frequency response of a continuous-time system. + + Parameters + ---------- + system : an instance of the `lti` class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `lti`) + * 2 (num, den) + * 3 (zeros, poles, gain) + * 4 (A, B, C, D) + + w : array_like, optional + Array of frequencies (in rad/s). Magnitude and phase data is + calculated for every value in this array. If not given, a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/s] + H : 1D ndarray + Array of complex magnitude values + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). + + See Also + -------- + scipy.signal.freqresp + + """ + if isinstance(system, lti): + if isinstance(system, (TransferFunction, ZerosPolesGain)): + sys = system + else: + sys = system._as_zpk() + elif isinstance(system, dlti): + raise AttributeError('freqresp can only be used with continuous-time ' + 'systems.') + else: + sys = lti(*system)._as_zpk() + + if sys.inputs != 1 or sys.outputs != 1: + raise ValueError("freqresp() requires a SISO (single input, single " + "output) system.") + + if w is not None: + worN = w + else: + worN = n + + if isinstance(sys, TransferFunction): + # In the call to freqs(), sys.num.ravel() is used because there are + # cases where sys.num is a 2-D array with a single row. + w, h = freqs(sys.num.ravel(), sys.den, worN=worN) + + elif isinstance(sys, ZerosPolesGain): + w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN) + + return w, h + + +# ### place_poles ### + +# This class will be used by place_poles to return its results +# see https://code.activestate.com/recipes/52308/ +class Bunch: + def __init__(self, **kwds): + self.__dict__.update(kwds) + + +def _valid_inputs(A, B, poles, method, rtol, maxiter): + """ + Check the poles come in complex conjugage pairs + Check shapes of A, B and poles are compatible. + Check the method chosen is compatible with provided poles + Return update method to use and ordered poles + + """ + if poles.ndim > 1: + raise ValueError("Poles must be a 1D array like.") + # Will raise ValueError if poles do not come in complex conjugates pairs + poles = _order_complex_poles(poles) + if A.ndim > 2: + raise ValueError("A must be a 2D array/matrix.") + if B.ndim > 2: + raise ValueError("B must be a 2D array/matrix") + if A.shape[0] != A.shape[1]: + raise ValueError("A must be square") + if len(poles) > A.shape[0]: + raise ValueError("maximum number of poles is %d but you asked for %d" % + (A.shape[0], len(poles))) + if len(poles) < A.shape[0]: + raise ValueError("number of poles is %d but you should provide %d" % + (len(poles), A.shape[0])) + r = cupy.linalg.matrix_rank(B) + for p in poles: + if sum(p == poles) > r: + raise ValueError("at least one of the requested pole is repeated " + "more than rank(B) times") + # Choose update method + update_loop = _YT_loop + if method not in ('KNV0', 'YT'): + raise ValueError("The method keyword must be one of 'YT' or 'KNV0'") + + if method == "KNV0": + update_loop = _KNV0_loop + if not all(cupy.isreal(poles)): + raise ValueError("Complex poles are not supported by KNV0") + + if maxiter < 1: + raise ValueError("maxiter must be at least equal to 1") + + # We do not check rtol <= 0 as the user can use a negative rtol to + # force maxiter iterations + if rtol > 1: + raise ValueError("rtol can not be greater than 1") + + return update_loop, poles + + +def _order_complex_poles(poles): + """ + Check we have complex conjugates pairs and reorder P according to YT, ie + real_poles, complex_i, conjugate complex_i, .... + The lexicographic sort on the complex poles is added to help the user to + compare sets of poles. + """ + ordered_poles = cupy.sort(poles[cupy.isreal(poles)]) + im_poles = [] + for p in cupy.sort(poles[cupy.imag(poles) < 0]): + if cupy.conj(p) in poles: + im_poles.extend((p, cupy.conj(p))) + + ordered_poles = cupy.hstack((ordered_poles, im_poles)) + + if poles.shape[0] != len(ordered_poles): + raise ValueError("Complex poles must come with their conjugates") + return ordered_poles + + +def _KNV0(B, ker_pole, transfer_matrix, j, poles): + """ + Algorithm "KNV0" Kautsky et Al. Robust pole + assignment in linear state feedback, Int journal of Control + 1985, vol 41 p 1129->1155 + https://la.epfl.ch/files/content/sites/la/files/ + users/105941/public/KautskyNicholsDooren + + """ + # Remove xj form the base + transfer_matrix_not_j = cupy.delete(transfer_matrix, j, axis=1) + # If we QR this matrix in full mode Q=Q0|Q1 + # then Q1 will be a single column orthogonnal to + # Q0, that's what we are looking for ! + + # After merge of gh-4249 great speed improvements could be achieved + # using QR updates instead of full QR in the line below + + # To debug with numpy qr uncomment the line below + # Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete") + # Q, R = s_qr(transfer_matrix_not_j, mode="full") + Q, R = cupy.linalg.qr(transfer_matrix_not_j, mode="complete") + + mat_ker_pj = ker_pole[j] @ ker_pole[j].T + yj = mat_ker_pj @ Q[:, -1] + + # If Q[:, -1] is "almost" orthogonal to ker_pole[j] its + # projection into ker_pole[j] will yield a vector + # close to 0. As we are looking for a vector in ker_pole[j] + # simply stick with transfer_matrix[:, j] (unless someone provides me with + # a better choice ?) + + if not cupy.allclose(yj, 0): + xj = yj / cupy.linalg.norm(yj) + transfer_matrix[:, j] = xj + + # KNV does not support complex poles, using YT technique the two lines + # below seem to work 9 out of 10 times but it is not reliable enough: + # transfer_matrix[:, j]=real(xj) + # transfer_matrix[:, j+1]=imag(xj) + + # Add this at the beginning of this function if you wish to test + # complex support: + # if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])): + # return + # Problems arise when imag(xj)=>0 I have no idea on how to fix this + + +def _YT_real(ker_pole, Q, transfer_matrix, i, j): + """ + Applies algorithm from YT section 6.1 page 19 related to real pairs + """ + i = int(i) + j = int(j) + + # step 1 page 19 + u = Q[:, -2, None] + v = Q[:, -1, None] + + # step 2 page 19 +# m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) - +# np.dot(v, u.T)), ker_pole[j]) + m = (ker_pole[i].T @ (u @ v.T - v @ u.T)) @ ker_pole[j] + + # step 3 page 19 + um, sm, vm = cupy.linalg.svd(m) + # mu1, mu2 two first columns of U => 2 first lines of U.T + mu1, mu2 = um.T[:2, :, None] + # VM is V.T with numpy we want the first two lines of V.T + nu1, nu2 = vm[:2, :, None] + + # what follows is a rough python translation of the formulas + # in section 6.2 page 20 (step 4) + transfer_matrix_j_mo_transfer_matrix_j = cupy.vstack(( + transfer_matrix[:, i, None], + transfer_matrix[:, j, None])) + + if not cupy.allclose(sm[0], sm[1]): + ker_pole_imo_mu1 = ker_pole[i] @ mu1 + ker_pole_i_nu1 = ker_pole[j] @ nu1 + ker_pole_mu_nu = cupy.vstack((ker_pole_imo_mu1, ker_pole_i_nu1)) + else: + ker_pole_ij = cupy.vstack(( + cupy.hstack((ker_pole[i], + cupy.zeros(ker_pole[i].shape))), + cupy.hstack((cupy.zeros(ker_pole[j].shape), + ker_pole[j])) + )) + mu_nu_matrix = cupy.vstack( + (cupy.hstack((mu1, mu2)), cupy.hstack((nu1, nu2))) + ) + ker_pole_mu_nu = ker_pole_ij @ mu_nu_matrix + transfer_matrix_ij = ((ker_pole_mu_nu @ ker_pole_mu_nu.T) + @ transfer_matrix_j_mo_transfer_matrix_j) + + if not cupy.allclose(transfer_matrix_ij, 0): + transfer_matrix_ij = (sqrt(2) * transfer_matrix_ij / + cupy.linalg.norm(transfer_matrix_ij)) + transfer_matrix[:, i] = transfer_matrix_ij[ + :transfer_matrix[:, i].shape[0], 0 + ] + transfer_matrix[:, j] = transfer_matrix_ij[ + transfer_matrix[:, i].shape[0]:, 0 + ] + else: + # As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to + # Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to + # ker_pole_mu_nu and iterate. As we are looking for a vector in + # Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help + # (that's a guess, not a claim !) + transfer_matrix[:, i] = ker_pole_mu_nu[ + :transfer_matrix[:, i].shape[0], 0 + ] + transfer_matrix[:, j] = ker_pole_mu_nu[ + transfer_matrix[:, i].shape[0]:, 0 + ] + + +def _YT_complex(ker_pole, Q, transfer_matrix, i, j): + """ + Applies algorithm from YT section 6.2 page 20 related to complex pairs + """ + # step 1 page 20 + ur = sqrt(2) * Q[:, -2, None] + ui = sqrt(2) * Q[:, -1, None] + u = ur + 1j*ui + + # step 2 page 20 + ker_pole_ij = ker_pole[i] +# m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) - +# np.dot(np.conj(u), u.T)), ker_pole_ij) + + m = ker_pole_ij.conj().T @ (u @ u.conj().T - u.conj() @ u.T) @ ker_pole_ij + + # step 3 page 20 + # e_val, e_vec = cupy.linalg.eig(m) + + # XXX: delegate to numpy + import numpy as np + e_val, e_vec = np.linalg.eig(m.get()) + e_val, e_vec = cupy.asarray(e_val), cupy.asarray(e_vec) + + # sort eigenvalues according to their module + e_val_idx = cupy.argsort(cupy.abs(e_val)) + mu1 = e_vec[:, e_val_idx[-1], None] + mu2 = e_vec[:, e_val_idx[-2], None] + + # what follows is a rough python translation of the formulas + # in section 6.2 page 20 (step 4) + + # remember transfer_matrix_i has been split as + # transfer_matrix[i]=real(transfer_matrix_i) and + # transfer_matrix[j]=imag(transfer_matrix_i) + transfer_matrix_j_mo_transfer_matrix_j = ( + transfer_matrix[:, i, None] + + 1j*transfer_matrix[:, j, None] + ) + + if not cupy.allclose(cupy.abs(e_val[e_val_idx[-1]]), + cupy.abs(e_val[e_val_idx[-2]])): + ker_pole_mu = ker_pole_ij @ mu1 + else: + mu1_mu2_matrix = cupy.hstack((mu1, mu2)) + ker_pole_mu = ker_pole_ij @ mu1_mu2_matrix + transfer_matrix_i_j = cupy.dot((ker_pole_mu @ ker_pole_mu.conj().T), + transfer_matrix_j_mo_transfer_matrix_j) + + if not cupy.allclose(transfer_matrix_i_j, 0): + transfer_matrix_i_j = (transfer_matrix_i_j / + cupy.linalg.norm(transfer_matrix_i_j)) + transfer_matrix[:, i] = cupy.real(transfer_matrix_i_j[:, 0]) + transfer_matrix[:, j] = cupy.imag(transfer_matrix_i_j[:, 0]) + else: + # same idea as in YT_real + transfer_matrix[:, i] = cupy.real(ker_pole_mu[:, 0]) + transfer_matrix[:, j] = cupy.imag(ker_pole_mu[:, 0]) + + +def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): + """ + Algorithm "YT" Tits, Yang. Globally Convergent + Algorithms for Robust Pole Assignment by State Feedback + https://hdl.handle.net/1903/5598 + The poles P have to be sorted accordingly to section 6.2 page 20 + + """ + # The IEEE edition of the YT paper gives useful information on the + # optimal update order for the real poles in order to minimize the number + # of times we have to loop over all poles, see page 1442 + nb_real = poles[cupy.isreal(poles)].shape[0] + # hnb => Half Nb Real + hnb = nb_real // 2 + + # Stick to the indices in the paper and then remove one to get numpy array + # index it is a bit easier to link the code to the paper this way even if + # it is not very clean. The paper is unclear about what should be done when + # there is only one real pole => use KNV0 on this real pole seem to work + if nb_real > 0: + # update the biggest real pole with the smallest one + update_order = [[cupy.array(nb_real)], [cupy.array(1)]] + else: + update_order = [[], []] + + r_comp = cupy.arange(nb_real+1, len(poles)+1, 2) + # step 1.a + r_p = cupy.arange(1, hnb+nb_real % 2) + update_order[0].extend(2*r_p) + update_order[1].extend(2*r_p+1) + # step 1.b + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 1.c + r_p = cupy.arange(1, hnb+1) + update_order[0].extend(2*r_p-1) + update_order[1].extend(2*r_p) + # step 1.d + if hnb == 0 and cupy.isreal(poles[0]): + update_order[0].append(cupy.array(1)) + update_order[1].append(cupy.array(1)) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 2.a + r_j = cupy.arange(2, hnb+nb_real % 2) + for j in r_j: + for i in range(1, hnb+1): + update_order[0].append(cupy.array(i)) + update_order[1].append(cupy.array(i+j)) + # step 2.b + if hnb == 0 and cupy.isreal(poles[0]): + update_order[0].append(cupy.array(1)) + update_order[1].append(cupy.array(1)) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 2.c + r_j = cupy.arange(2, hnb+nb_real % 2) + for j in r_j: + for i in range(hnb+1, nb_real+1): + idx_1 = i+j + if idx_1 > nb_real: + idx_1 = i+j-nb_real + update_order[0].append(cupy.array(i)) + update_order[1].append(cupy.array(idx_1)) + # step 2.d + if hnb == 0 and cupy.isreal(poles[0]): + update_order[0].append(cupy.array(1)) + update_order[1].append(cupy.array(1)) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + # step 3.a + for i in range(1, hnb+1): + update_order[0].append(cupy.array(i)) + update_order[1].append(cupy.array(i+hnb)) + # step 3.b + if hnb == 0 and cupy.isreal(poles[0]): + update_order[0].append(cupy.array(1)) + update_order[1].append(cupy.array(1)) + update_order[0].extend(r_comp) + update_order[1].extend(r_comp+1) + + update_order = cupy.array(update_order).T-1 + stop = False + nb_try = 0 + while nb_try < maxiter and not stop: + det_transfer_matrixb = cupy.abs(cupy.linalg.det(transfer_matrix)) + for i, j in update_order: + i, j = int(i), int(j) + + if i == j: + assert i == 0, "i!=0 for KNV call in YT" + assert cupy.isreal(poles[i]), "calling KNV on a complex pole" + _KNV0(B, ker_pole, transfer_matrix, i, poles) + else: + # a replacement for + # np.delete(transfer_matrix.get(), (i, j), axis=1) + idx = list(range(transfer_matrix.shape[1])) + idx.pop(i) + idx.pop(j-1) + transfer_matrix_not_i_j = transfer_matrix[:, idx] + + # after merge of gh-4249 great speed improvements could be + # achieved using QR updates instead of full QR below + + # to debug with numpy qr uncomment the line below + # Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete") + # Q, _ = s_qr(transfer_matrix_not_i_j, mode="full") + Q, _ = cupy.linalg.qr(transfer_matrix_not_i_j, mode="complete") + + if cupy.isreal(poles[i]): + assert cupy.isreal(poles[j]), "mixing real and complex " +\ + "in YT_real" + str(poles) + _YT_real(ker_pole, Q, transfer_matrix, i, j) + else: + msg = "mixing real and complex in YT_real" + str(poles) + assert ~cupy.isreal(poles[i]), msg + _YT_complex(ker_pole, Q, transfer_matrix, i, j) + + sq_spacing = sqrt(cupy.finfo(cupy.float64).eps) + det_transfer_matrix = max((sq_spacing, + cupy.abs(cupy.linalg.det(transfer_matrix)))) + cur_rtol = cupy.abs( + (det_transfer_matrix - + det_transfer_matrixb) / + det_transfer_matrix) + if cur_rtol < rtol and det_transfer_matrix > sq_spacing: + # Convergence test from YT page 21 + stop = True + nb_try += 1 + return stop, cur_rtol, nb_try + + +def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): + """ + Loop over all poles one by one and apply KNV method 0 algorithm + """ + # This method is useful only because we need to be able to call + # _KNV0 from YT without looping over all poles, otherwise it would + # have been fine to mix _KNV0_loop and _KNV0 in a single function + stop = False + nb_try = 0 + while nb_try < maxiter and not stop: + det_transfer_matrixb = cupy.abs(cupy.linalg.det(transfer_matrix)) + for j in range(B.shape[0]): + _KNV0(B, ker_pole, transfer_matrix, j, poles) + + sq_spacing = sqrt(sqrt(cupy.finfo(cupy.float64).eps)) + + det_transfer_matrix = max((sq_spacing, + cupy.abs(cupy.linalg.det(transfer_matrix)))) + cur_rtol = cupy.abs((det_transfer_matrix - det_transfer_matrixb) / + det_transfer_matrix) + if cur_rtol < rtol and det_transfer_matrix > sq_spacing: + # Convergence test from YT page 21 + stop = True + + nb_try += 1 + return stop, cur_rtol, nb_try + + +def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30): + """ + Compute K such that eigenvalues (A - dot(B, K))=poles. + + K is the gain matrix such as the plant described by the linear system + ``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``, + as close as possible to those asked for in poles. + + SISO, MISO and MIMO systems are supported. + + Parameters + ---------- + A, B : ndarray + State-space representation of linear system ``AX + BU``. + poles : array_like + Desired real poles and/or complex conjugates poles. + Complex poles are only supported with ``method="YT"`` (default). + method: {'YT', 'KNV0'}, optional + Which method to choose to find the gain matrix K. One of: + + - 'YT': Yang Tits + - 'KNV0': Kautsky, Nichols, Van Dooren update method 0 + + See References and Notes for details on the algorithms. + rtol: float, optional + After each iteration the determinant of the eigenvectors of + ``A - B*K`` is compared to its previous value, when the relative + error between these two values becomes lower than `rtol` the algorithm + stops. Default is 1e-3. + maxiter: int, optional + Maximum number of iterations to compute the gain matrix. + Default is 30. + + Returns + ------- + full_state_feedback : Bunch object + full_state_feedback is composed of: + gain_matrix : 1-D ndarray + The closed loop matrix K such as the eigenvalues of ``A-BK`` + are as close as possible to the requested poles. + computed_poles : 1-D ndarray + The poles corresponding to ``A-BK`` sorted as first the real + poles in increasing order, then the complex congugates in + lexicographic order. + requested_poles : 1-D ndarray + The poles the algorithm was asked to place sorted as above, + they may differ from what was achieved. + X : 2-D ndarray + The transfer matrix such as ``X * diag(poles) = (A - B*K)*X`` + (see Notes) + rtol : float + The relative tolerance achieved on ``det(X)`` (see Notes). + `rtol` will be NaN if it is possible to solve the system + ``diag(poles) = (A - B*K)``, or 0 when the optimization + algorithms can't do anything i.e when ``B.shape[1] == 1``. + nb_iter : int + The number of iterations performed before converging. + `nb_iter` will be NaN if it is possible to solve the system + ``diag(poles) = (A - B*K)``, or 0 when the optimization + algorithms can't do anything i.e when ``B.shape[1] == 1``. + + Notes + ----- + The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et + al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer + matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses + rank-2 updates. This yields on average more robust solutions (see [2]_ + pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV + does not in its original version. Only update method 0 proposed by KNV has + been implemented here, hence the name ``'KNV0'``. + + KNV extended to complex poles is used in Matlab's ``place`` function, YT is + distributed under a non-free licence by Slicot under the name ``robpole``. + It is unclear and undocumented how KNV0 has been extended to complex poles + (Tits and Yang claim on page 14 of their paper that their method can not be + used to extend KNV to complex poles), therefore only YT supports them in + this implementation. + + As the solution to the problem of pole placement is not unique for MIMO + systems, both methods start with a tentative transfer matrix which is + altered in various way to increase its determinant. Both methods have been + proven to converge to a stable solution, however depending on the way the + initial transfer matrix is chosen they will converge to different + solutions and therefore there is absolutely no guarantee that using + ``'KNV0'`` will yield results similar to Matlab's or any other + implementation of these algorithms. + + Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'`` + is only provided because it is needed by ``'YT'`` in some specific cases. + Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'`` + when ``abs(det(X))`` is used as a robustness indicator. + + [2]_ is available as a technical report on the following URL: + https://hdl.handle.net/1903/5598 + + See Also + -------- + scipy.signal.place_poles + + References + ---------- + .. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment + in linear state feedback", International Journal of Control, Vol. 41 + pp. 1129-1155, 1985. + .. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust + pole assignment by state feedback", IEEE Transactions on Automatic + Control, Vol. 41, pp. 1432-1452, 1996. + """ + # Move away all the inputs checking, it only adds noise to the code + update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter) + + # The current value of the relative tolerance we achieved + cur_rtol = 0 + # The number of iterations needed before converging + nb_iter = 0 + + # Step A: QR decomposition of B page 1132 KN + # to debug with numpy qr uncomment the line below + # u, z = np.linalg.qr(B, mode="complete") + # u, z = s_qr(B, mode="full") + u, z = cupy.linalg.qr(B, mode='complete') + rankB = cupy.linalg.matrix_rank(B) + + u0 = u[:, :rankB] + u1 = u[:, rankB:] + z = z[:rankB, :] + + # If we can use the identity matrix as X the solution is obvious + if B.shape[0] == rankB: + # if B is square and full rank there is only one solution + # such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0]) + # i.e K=inv(B)*(diag(P)-A) + # if B has as many lines as its rank (but not square) there are many + # solutions and we can choose one using least squares + # => use lstsq in both cases. + # In both cases the transfer matrix X will be eye(A.shape[0]) and I + # can hardly think of a better one so there is nothing to optimize + # + # for complex poles we use the following trick + # + # |a -b| has for eigenvalues a+b and a-b + # |b a| + # + # |a+bi 0| has the obvious eigenvalues a+bi and a-bi + # |0 a-bi| + # + # e.g solving the first one in R gives the solution + # for the second one in C + diag_poles = cupy.zeros(A.shape) + idx = 0 + while idx < poles.shape[0]: + p = poles[idx] + diag_poles[idx, idx] = cupy.real(p) + if ~cupy.isreal(p): + diag_poles[idx, idx+1] = -cupy.imag(p) + diag_poles[idx+1, idx+1] = cupy.real(p) + diag_poles[idx+1, idx] = cupy.imag(p) + idx += 1 # skip next one + idx += 1 + gain_matrix = cupy.linalg.lstsq(B, diag_poles-A, rcond=-1)[0] + transfer_matrix = cupy.eye(A.shape[0]) + cur_rtol = cupy.nan + nb_iter = cupy.nan + else: + # step A (p1144 KNV) and beginning of step F: decompose + # dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors + # in the same loop + ker_pole = [] + + # flag to skip the conjugate of a complex pole + skip_conjugate = False + # select orthonormal base ker_pole for each Pole and vectors for + # transfer_matrix + for j in range(B.shape[0]): + if skip_conjugate: + skip_conjugate = False + continue + pole_space_j = cupy.dot(u1.T, A-poles[j]*cupy.eye(B.shape[0])).T + + # after QR Q=Q0|Q1 + # only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix. + # Q1 is orthogonnal to Q0 and will be multiplied by the zeros in + # R when using mode "complete". In default mode Q1 and the zeros + # in R are not computed + + # To debug with numpy qr uncomment the line below + # Q, _ = np.linalg.qr(pole_space_j, mode="complete") + # Q, _ = s_qr(pole_space_j, mode="full") + Q, _ = cupy.linalg.qr(pole_space_j, mode="complete") + + ker_pole_j = Q[:, pole_space_j.shape[1]:] + + # We want to select one vector in ker_pole_j to build the transfer + # matrix, however qr returns sometimes vectors with zeros on the + # same line for each pole and this yields very long convergence + # times. + # Or some other times a set of vectors, one with zero imaginary + # part and one (or several) with imaginary parts. After trying + # many ways to select the best possible one (eg ditch vectors + # with zero imaginary part for complex poles) I ended up summing + # all vectors in ker_pole_j, this solves 100% of the problems and + # is a valid choice for transfer_matrix. + # This way for complex poles we are sure to have a non zero + # imaginary part that way, and the problem of lines full of zeros + # in transfer_matrix is solved too as when a vector from + # ker_pole_j has a zero the other one(s) when + # ker_pole_j.shape[1]>1) for sure won't have a zero there. + + transfer_matrix_j = cupy.sum(ker_pole_j, axis=1)[:, None] + transfer_matrix_j = (transfer_matrix_j / + cupy.linalg.norm(transfer_matrix_j)) + if ~cupy.isreal(poles[j]): # complex pole + transfer_matrix_j = cupy.hstack([cupy.real(transfer_matrix_j), + cupy.imag(transfer_matrix_j)]) + ker_pole.extend([ker_pole_j, ker_pole_j]) + + # Skip next pole as it is the conjugate + skip_conjugate = True + else: # real pole, nothing to do + ker_pole.append(ker_pole_j) + + if j == 0: + transfer_matrix = transfer_matrix_j + else: + transfer_matrix = cupy.hstack( + (transfer_matrix, transfer_matrix_j)) + + if rankB > 1: # otherwise there is nothing we can optimize + stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix, + poles, B, maxiter, rtol) + if not stop and rtol > 0: + # if rtol<=0 the user has probably done that on purpose, + # don't annoy him + err_msg = ( + "Convergence was not reached after maxiter iterations.\n" + f"You asked for a tolerance of {rtol}, we got {cur_rtol}." + ) + warnings.warn(err_msg, stacklevel=2) + + # reconstruct transfer_matrix to match complex conjugate pairs, + # ie transfer_matrix_j/transfer_matrix_j+1 are + # Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after + transfer_matrix = transfer_matrix.astype(complex) + idx = 0 + while idx < poles.shape[0]-1: + if ~cupy.isreal(poles[idx]): + rel = transfer_matrix[:, idx].copy() + img = transfer_matrix[:, idx+1] + # rel will be an array referencing a column of transfer_matrix + # if we don't copy() it will changer after the next line and + # and the line after will not yield the correct value + transfer_matrix[:, idx] = rel-1j*img + transfer_matrix[:, idx+1] = rel+1j*img + idx += 1 # skip next one + idx += 1 + + try: + m = cupy.linalg.solve(transfer_matrix.T, cupy.diag( + poles) @ transfer_matrix.T).T + gain_matrix = cupy.linalg.solve(z, u0.T @ (m-A)) + except cupy.linalg.LinAlgError as e: + raise ValueError("The poles you've chosen can't be placed. " + "Check the controllability matrix and try " + "another set of poles") from e + + # Beware: Kautsky solves A+BK but the usual form is A-BK + gain_matrix = -gain_matrix + # K still contains complex with ~=0j imaginary parts, get rid of them + gain_matrix = cupy.real(gain_matrix) + + full_state_feedback = Bunch() + full_state_feedback.gain_matrix = gain_matrix + + # XXX: delegate to NumPy + temp = (A - B @ gain_matrix).get() + import numpy as np + poles = np.linalg.eig(temp)[0] + ordered_poles = _order_complex_poles(cupy.asarray(poles)) + + full_state_feedback.computed_poles = ordered_poles + full_state_feedback.requested_poles = poles + full_state_feedback.X = transfer_matrix + full_state_feedback.rtol = cur_rtol + full_state_feedback.nb_iter = nb_iter + + return full_state_feedback + + +# ### dlsim and related functions ### + +def dlsim(system, u, t=None, x0=None): + """ + Simulate output of a discrete-time linear system. + + Parameters + ---------- + system : tuple of array_like or instance of `dlti` + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + u : array_like + An input array describing the input at each time `t` (interpolation is + assumed between given times). If there are multiple inputs, then each + column of the rank-2 array represents an input. + t : array_like, optional + The time steps at which the input is defined. If `t` is given, it + must be the same length as `u`, and the final value in `t` determines + the number of steps returned in the output. + x0 : array_like, optional + The initial conditions on the state vector (zero by default). + + Returns + ------- + tout : ndarray + Time values for the output, as a 1-D array. + yout : ndarray + System response, as a 1-D array. + xout : ndarray, optional + Time-evolution of the state-vector. Only generated if the input is a + `StateSpace` system. + + See Also + -------- + scipy.signal.dlsim + lsim, dstep, dimpulse, cont2discrete + """ + # Convert system to dlti-StateSpace + if isinstance(system, lti): + raise AttributeError('dlsim can only be used with discrete-time dlti ' + 'systems.') + elif not isinstance(system, dlti): + system = dlti(*system[:-1], dt=system[-1]) + + # Condition needed to ensure output remains compatible + is_ss_input = isinstance(system, StateSpace) + system = system._as_ss() + + u = cupy.atleast_1d(u) + + if u.ndim == 1: + u = cupy.atleast_2d(u).T + + if t is None: + out_samples = len(u) + stoptime = (out_samples - 1) * system.dt + else: + stoptime = t[-1] + out_samples = int(cupy.floor(stoptime / system.dt)) + 1 + + # Pre-build output arrays + xout = cupy.zeros((out_samples, system.A.shape[0])) + yout = cupy.zeros((out_samples, system.C.shape[0])) + tout = cupy.linspace(0.0, stoptime, num=out_samples) + + # Check initial condition + if x0 is None: + xout[0, :] = cupy.zeros((system.A.shape[1],)) + else: + xout[0, :] = cupy.asarray(x0) + + # Pre-interpolate inputs into the desired time steps + if t is None: + u_dt = u + else: + if len(u.shape) == 1: + u = u[:, None] + + u_dt = make_interp_spline(t, u, k=1)(tout) + + # Simulate the system + for i in range(0, out_samples - 1): + xout[i+1, :] = system.A @ xout[i, :] + system.B @ u_dt[i, :] + yout[i, :] = system.C @ xout[i, :] + system.D @ u_dt[i, :] + + # Last point + yout[out_samples-1, :] = (system.C @ xout[out_samples-1, :] + + system.D @ u_dt[out_samples-1, :]) + + if is_ss_input: + return tout, yout, xout + else: + return tout, yout + + +def dimpulse(system, x0=None, t=None, n=None): + """ + Impulse response of discrete-time system. + + Parameters + ---------- + system : tuple of array_like or instance of `dlti` + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + x0 : array_like, optional + Initial state-vector. Defaults to zero. + t : array_like, optional + Time points. Computed if not given. + n : int, optional + The number of time points to compute (if `t` is not given). + + Returns + ------- + tout : ndarray + Time values for the output, as a 1-D array. + yout : tuple of ndarray + Impulse response of system. Each element of the tuple represents + the output of the system based on an impulse in each input. + + See Also + -------- + scipy.signal.dimpulse + impulse, dstep, dlsim, cont2discrete + """ + # Convert system to dlti-StateSpace + if isinstance(system, dlti): + system = system._as_ss() + elif isinstance(system, lti): + raise AttributeError('dimpulse can only be used with discrete-time ' + 'dlti systems.') + else: + system = dlti(*system[:-1], dt=system[-1])._as_ss() + + # Default to 100 samples if unspecified + if n is None: + n = 100 + + # If time is not specified, use the number of samples + # and system dt + if t is None: + t = cupy.linspace(0, n * system.dt, n, endpoint=False) + else: + t = cupy.asarray(t) + + # For each input, implement a step change + yout = None + for i in range(0, system.inputs): + u = cupy.zeros((t.shape[0], system.inputs)) + u[0, i] = 1.0 + + one_output = dlsim(system, u, t=t, x0=x0) + + if yout is None: + yout = (one_output[1],) + else: + yout = yout + (one_output[1],) + + tout = one_output[0] + + return tout, yout + + +def dstep(system, x0=None, t=None, n=None): + """ + Step response of discrete-time system. + + Parameters + ---------- + system : tuple of array_like + A tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `dlti`) + * 3: (num, den, dt) + * 4: (zeros, poles, gain, dt) + * 5: (A, B, C, D, dt) + + x0 : array_like, optional + Initial state-vector. Defaults to zero. + t : array_like, optional + Time points. Computed if not given. + n : int, optional + The number of time points to compute (if `t` is not given). + + Returns + ------- + tout : ndarray + Output time points, as a 1-D array. + yout : tuple of ndarray + Step response of system. Each element of the tuple represents + the output of the system based on a step response to each input. + + See Also + -------- + scipy.signal.dlstep + step, dimpulse, dlsim, cont2discrete + """ + # Convert system to dlti-StateSpace + if isinstance(system, dlti): + system = system._as_ss() + elif isinstance(system, lti): + raise AttributeError('dstep can only be used with discrete-time dlti ' + 'systems.') + else: + system = dlti(*system[:-1], dt=system[-1])._as_ss() + + # Default to 100 samples if unspecified + if n is None: + n = 100 + + # If time is not specified, use the number of samples + # and system dt + if t is None: + t = cupy.linspace(0, n * system.dt, n, endpoint=False) + else: + t = cupy.asarray(t) + + # For each input, implement a step change + yout = None + for i in range(0, system.inputs): + u = cupy.zeros((t.shape[0], system.inputs)) + u[:, i] = cupy.ones((t.shape[0],)) + + one_output = dlsim(system, u, t=t, x0=x0) + + if yout is None: + yout = (one_output[1],) + else: + yout = yout + (one_output[1],) + + tout = one_output[0] + + return tout, yout + + +def dfreqresp(system, w=None, n=10000, whole=False): + r""" + Calculate the frequency response of a discrete-time system. + + Parameters + ---------- + system : an instance of the `dlti` class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `dlti`) + * 2 (numerator, denominator, dt) + * 3 (zeros, poles, gain, dt) + * 4 (A, B, C, D, dt) + + w : array_like, optional + Array of frequencies (in radians/sample). Magnitude and phase data is + calculated for every value in this array. If not given a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + whole : bool, optional + Normally, if 'w' is not given, frequencies are computed from 0 to the + Nyquist frequency, pi radians/sample (upper-half of unit-circle). If + `whole` is True, compute frequencies from 0 to 2*pi radians/sample. + + Returns + ------- + w : 1D ndarray + Frequency array [radians/sample] + H : 1D ndarray + Array of complex magnitude values + + See Also + -------- + scipy.signal.dfeqresp + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). + """ + if not isinstance(system, dlti): + if isinstance(system, lti): + raise AttributeError('dfreqresp can only be used with ' + 'discrete-time systems.') + + system = dlti(*system[:-1], dt=system[-1]) + + if isinstance(system, StateSpace): + # No SS->ZPK code exists right now, just SS->TF->ZPK + system = system._as_tf() + + if not isinstance(system, (TransferFunction, ZerosPolesGain)): + raise ValueError('Unknown system type') + + if system.inputs != 1 or system.outputs != 1: + raise ValueError("dfreqresp requires a SISO (single input, single " + "output) system.") + + if w is not None: + worN = w + else: + worN = n + + if isinstance(system, TransferFunction): + # Convert numerator and denominator from polynomials in the variable + # 'z' to polynomials in the variable 'z^-1', as freqz expects. + num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den) + w, h = freqz(num, den, worN=worN, whole=whole) + + elif isinstance(system, ZerosPolesGain): + w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN, + whole=whole) + + return w, h + + +def dbode(system, w=None, n=100): + r""" + Calculate Bode magnitude and phase data of a discrete-time system. + + Parameters + ---------- + system : an instance of the LTI class or a tuple describing the system. + The following gives the number of elements in the tuple and + the interpretation: + + * 1 (instance of `dlti`) + * 2 (num, den, dt) + * 3 (zeros, poles, gain, dt) + * 4 (A, B, C, D, dt) + + w : array_like, optional + Array of frequencies (in radians/sample). Magnitude and phase data is + calculated for every value in this array. If not given a reasonable + set will be calculated. + n : int, optional + Number of frequency points to compute if `w` is not given. The `n` + frequencies are logarithmically spaced in an interval chosen to + include the influence of the poles and zeros of the system. + + Returns + ------- + w : 1D ndarray + Frequency array [rad/time_unit] + mag : 1D ndarray + Magnitude array [dB] + phase : 1D ndarray + Phase array [deg] + + See Also + -------- + scipy.signal.dbode + + Notes + ----- + If (num, den) is passed in for ``system``, coefficients for both the + numerator and denominator should be specified in descending exponent + order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). + """ + w, y = dfreqresp(system, w=w, n=n) + + if isinstance(system, dlti): + dt = system.dt + else: + dt = system[-1] + + mag = 20.0 * cupy.log10(abs(y)) + phase = cupy.rad2deg(cupy.unwrap(cupy.angle(y))) + + return w / dt, mag, phase + + +# ### cont2discrete ### + +def cont2discrete(system, dt, method="zoh", alpha=None): + """ + Transform a continuous to a discrete state-space system. + + Parameters + ---------- + system : a tuple describing the system or an instance of `lti` + The following gives the number of elements in the tuple and + the interpretation: + + * 1: (instance of `lti`) + * 2: (num, den) + * 3: (zeros, poles, gain) + * 4: (A, B, C, D) + + dt : float + The discretization time step. + method : str, optional + Which method to use: + + * gbt: generalized bilinear transformation + * bilinear: Tustin's approximation ("gbt" with alpha=0.5) + * euler: Euler (or forward differencing) method + ("gbt" with alpha=0) + * backward_diff: Backwards differencing ("gbt" with alpha=1.0) + * zoh: zero-order hold (default) + * foh: first-order hold (*versionadded: 1.3.0*) + * impulse: equivalent impulse response (*versionadded: 1.3.0*) + + alpha : float within [0, 1], optional + The generalized bilinear transformation weighting parameter, which + should only be specified with method="gbt", and is ignored otherwise + + Returns + ------- + sysd : tuple containing the discrete system + Based on the input type, the output will be of the form + + * (num, den, dt) for transfer function input + * (zeros, poles, gain, dt) for zeros-poles-gain input + * (A, B, C, D, dt) for state-space system input + + Notes + ----- + By default, the routine uses a Zero-Order Hold (zoh) method to perform + the transformation. Alternatively, a generalized bilinear transformation + may be used, which includes the common Tustin's bilinear approximation, + an Euler's method technique, or a backwards differencing technique. + + See Also + -------- + scipy.signal.cont2discrete + + + """ + if len(system) == 1: + return system.to_discrete() + if len(system) == 2: + sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method, + alpha=alpha) + return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) + elif len(system) == 3: + sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt, + method=method, alpha=alpha) + return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) + elif len(system) == 4: + a, b, c, d = system + else: + raise ValueError("First argument must either be a tuple of 2 (tf), " + "3 (zpk), or 4 (ss) arrays.") + + if method == 'gbt': + if alpha is None: + raise ValueError("Alpha parameter must be specified for the " + "generalized bilinear transform (gbt) method") + elif alpha < 0 or alpha > 1: + raise ValueError("Alpha parameter must be within the interval " + "[0,1] for the gbt method") + + if method == 'gbt': + # This parameter is used repeatedly - compute once here + ima = cupy.eye(a.shape[0]) - alpha*dt*a + rhs = cupy.eye(a.shape[0]) + (1.0 - alpha)*dt*a + ad = cupy.linalg.solve(ima, rhs) + bd = cupy.linalg.solve(ima, dt*b) + + # Similarly solve for the output equation matrices + cd = cupy.linalg.solve(ima.T, c.T) + cd = cd.T + dd = d + alpha*(c @ bd) + + elif method == 'bilinear' or method == 'tustin': + return cont2discrete(system, dt, method="gbt", alpha=0.5) + + elif method == 'euler' or method == 'forward_diff': + return cont2discrete(system, dt, method="gbt", alpha=0.0) + + elif method == 'backward_diff': + return cont2discrete(system, dt, method="gbt", alpha=1.0) + + elif method == 'zoh': + # Build an exponential matrix + em_upper = cupy.hstack((a, b)) + + # Need to stack zeros under the a and b matrices + em_lower = cupy.hstack((cupy.zeros((b.shape[1], a.shape[0])), + cupy.zeros((b.shape[1], b.shape[1])))) + + em = cupy.vstack((em_upper, em_lower)) + ms = expm(dt * em) + + # Dispose of the lower rows + ms = ms[:a.shape[0], :] + + ad = ms[:, 0:a.shape[1]] + bd = ms[:, a.shape[1]:] + + cd = c + dd = d + + elif method == 'foh': + # Size parameters for convenience + n = a.shape[0] + m = b.shape[1] + + # Build an exponential matrix similar to 'zoh' method + # em_upper = block_diag(cupy.block([a, b]) * dt, cupy.eye(m)) + em_upper = block_diag(cupy.hstack([a, b]) * dt, cupy.eye(m)) + em_lower = cupy.zeros((m, n + 2 * m)) + + # em = cupy.block([[em_upper], [em_lower]]) # scipy uses np.block + em = cupy.vstack([em_upper, em_lower]) + + ms = linalg.expm(em) + + # Get the three blocks from upper rows + ms11 = ms[:n, 0:n] + ms12 = ms[:n, n:n + m] + ms13 = ms[:n, n + m:] + + ad = ms11 + bd = ms12 - ms13 + ms11 @ ms13 + cd = c + dd = d + c @ ms13 + + elif method == 'impulse': + if not cupy.allclose(d, 0): + raise ValueError("Impulse method is only applicable" + "to strictly proper systems") + + ad = expm(a * dt) + bd = ad @ b * dt + cd = c + dd = c @ b * dt + + else: + raise ValueError("Unknown transformation method '%s'" % method) + + return ad, bd, cd, dd, dt diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_max_len_seq.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_max_len_seq.py new file mode 100644 index 0000000000000000000000000000000000000000..41fdbdc38455015d0e3323617db2c073e6547423 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_max_len_seq.py @@ -0,0 +1,119 @@ +import cupy + + +_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1], + 9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8], + 14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14], + 18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21], + 23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20], + 27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7], + 31: [28], 32: [31, 30, 10]} + + +MAX_LEN_SEQ_KERNEL = r""" +#include +#include +#include + +extern "C" __global__ void max_len_seq( + long long length, long long n_taps, int n_state, long long* taps, + signed char* state, signed char* seq) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + for(long long i = 0; i < length; i++) { + signed char next_state = state[(idx + 1) % n_state]; + if(idx == n_state - 1) { + seq[i] = state[0]; + for(int n_tap = 0; n_tap < n_taps; n_tap++) { + long long tap = taps[n_tap]; + next_state ^= state[tap]; + } + } + state[idx] = next_state; + } +} +""" + +_max_len_seq = cupy.RawKernel(MAX_LEN_SEQ_KERNEL, 'max_len_seq') + + +def max_len_seq(nbits, state=None, length=None, taps=None): + """ + Maximum length sequence (MLS) generator. + + Parameters + ---------- + nbits : int + Number of bits to use. Length of the resulting sequence will + be ``(2**nbits) - 1``. Note that generating long sequences + (e.g., greater than ``nbits == 16``) can take a long time. + state : array_like, optional + If array, must be of length ``nbits``, and will be cast to binary + (bool) representation. If None, a seed of ones will be used, + producing a repeatable representation. If ``state`` is all + zeros, an error is raised as this is invalid. Default: None. + length : int, optional + Number of samples to compute. If None, the entire length + ``(2**nbits) - 1`` is computed. + taps : array_like, optional + Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence). + If None, taps will be automatically selected (for up to + ``nbits == 32``). + + Returns + ------- + seq : array + Resulting MLS sequence of 0's and 1's. + state : array + The final state of the shift register. + + Notes + ----- + The algorithm for MLS generation is generically described in: + + https://en.wikipedia.org/wiki/Maximum_length_sequence + + The default values for taps are specifically taken from the first + option listed for each value of ``nbits`` in: + + https://web.archive.org/web/20181001062252/http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm + + """ # NOQA + if taps is None: + if nbits not in _mls_taps: + known_taps = cupy.array(list(_mls_taps.keys())) + raise ValueError('nbits must be between %s and %s if taps is None' + % (known_taps.min(), known_taps.max())) + taps = cupy.array(_mls_taps[nbits], cupy.int64) + else: + taps = cupy.unique(cupy.array(taps, cupy.int64))[::-1] + if cupy.any(taps < 0) or cupy.any(taps > nbits) or taps.size < 1: + raise ValueError('taps must be non-empty with values between ' + 'zero and nbits (inclusive)') + taps = cupy.array(taps) # needed for Cython and Pythran + + n_max = (2 ** nbits) - 1 + if length is None: + length = n_max + else: + length = int(length) + if length < 0: + raise ValueError('length must be greater than or equal to 0') + + # We use int8 instead of bool here because NumPy arrays of bools + # don't seem to work nicely with Cython + if state is None: + state = cupy.ones(nbits, dtype=cupy.int8, order='c') + else: + # makes a copy if need be, ensuring it's 0's and 1's + state = cupy.array(state, dtype=bool, order='c').astype(cupy.int8) + if state.ndim != 1 or state.size != nbits: + raise ValueError('state must be a 1-D array of size nbits') + if cupy.all(state == 0): + raise ValueError('state must not be all zeros') + + seq = cupy.empty(length, dtype=cupy.int8, order='c') + n_taps = len(taps) + + _max_len_seq((1,), (nbits,), (length, n_taps, nbits, taps, state, seq)) + return seq, state diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_optimize.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..dd0e733efbb4fa3659baeb948310de98102a0a68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_optimize.py @@ -0,0 +1,261 @@ +""" A vendored copy of scipy.optimize.fminbound.""" + +import cupy + + +# standard status messages of optimizers +_status_message = {'success': 'Optimization terminated successfully.', + 'maxfev': 'Maximum number of function evaluations has ' + 'been exceeded.', + 'maxiter': 'Maximum number of iterations has been ' + 'exceeded.', + 'pr_loss': 'Desired error not necessarily achieved due ' + 'to precision loss.', + 'nan': 'NaN result encountered.', + 'out_of_bounds': 'The result is outside of the provided ' + 'bounds.'} + + +class OptimizeResult(dict): + """ Represents the optimization result. + """ + + def __getattr__(self, name): + try: + return self[name] + except KeyError as e: + raise AttributeError(name) from e + + +def _endprint(x, flag, fval, maxfun, xtol, disp): + if flag == 0: + if disp > 1: + print("\nOptimization terminated successfully;\n" + "The returned value satisfies the termination criteria\n" + "(using xtol = ", xtol, ")") + if flag == 1: + if disp: + print("\nMaximum number of function evaluations exceeded --- " + "increase maxfun argument.\n") + if flag == 2: + if disp: + print("\n{}".format(_status_message['nan'])) + return + + +def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500, + full_output=0, disp=1): + """Bounded minimization for scalar functions. + + Parameters + ---------- + func : callable f(x,*args) + Objective function to be minimized (must accept and return scalars). + x1, x2 : float or array scalar + Finite optimization bounds. + args : tuple, optional + Extra arguments passed to function. + xtol : float, optional + The convergence tolerance. + maxfun : int, optional + Maximum number of function evaluations allowed. + full_output : bool, optional + If True, return optional outputs. + disp : int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + + Returns + ------- + xopt : ndarray + Parameters (over given interval) which minimize the + objective function. + fval : number + The function value evaluated at the minimizer. + ierr : int + An error flag (0 if converged, 1 if maximum number of + function calls reached). + numfunc : int + The number of function calls made. + + + Returns + ------- + xopt : ndarray + Parameters (over given interval) which minimize the + objective function. + + See also + -------- + scipy.optimize.fminbound + + + Notes + ----- + Finds a local minimizer of the scalar function `func` in the + interval x1 < xopt < x2 using Brent's method. (See `brent` + for auto-bracketing.) + + References + ---------- + .. [1] Forsythe, G.E., M. A. Malcolm, and C. B. Moler. "Computer Methods + for Mathematical Computations." Prentice-Hall Series in Automatic + Computation 259 (1977). + .. [2] Brent, Richard P. Algorithms for Minimization Without Derivatives. + Courier Corporation, 2013. + + + """ + options = {'xatol': xtol, + 'maxiter': maxfun, + } + + res = _minimize_scalar_bounded(func, (x1, x2), args, **options) + if full_output: + return res['x'], res['fun'], res['status'], res['nfev'] + else: + return res['x'] + + +def _minimize_scalar_bounded(func, bounds, args=(), + xatol=1e-5, maxiter=500, disp=0, + **unknown_options): + """ + Options + ------- + maxiter : int + Maximum number of iterations to perform. + disp: int, optional + If non-zero, print messages. + 0 : no message printing. + 1 : non-convergence notification messages only. + 2 : print a message on convergence too. + 3 : print iteration results. + xatol : float + Absolute error in solution `xopt` acceptable for convergence. + + """ + maxfun = maxiter + # Test bounds are of correct form + if len(bounds) != 2: + raise ValueError('bounds must have two elements.') + x1, x2 = bounds + + if x1 > x2: + raise ValueError("The lower bound exceeds the upper bound.") + + flag = 0 + header = ' Func-count x f(x) Procedure' + step = ' initial' + + sqrt_eps = cupy.sqrt(2.2e-16) + golden_mean = 0.5 * (3.0 - cupy.sqrt(5.0)) + a, b = x1, x2 + fulc = a + golden_mean * (b - a) + nfc, xf = fulc, fulc + rat = e = 0.0 + x = xf + fx = func(x, *args) + num = 1 + fmin_data = (1, xf, fx) + fu = cupy.inf + + ffulc = fnfc = fx + xm = 0.5 * (a + b) + tol1 = sqrt_eps * cupy.abs(xf) + xatol / 3.0 + tol2 = 2.0 * tol1 + + if disp > 2: + print(" ") + print(header) + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) + + while (cupy.abs(xf - xm) > (tol2 - 0.5 * (b - a))): + golden = 1 + # Check for parabolic fit + if cupy.abs(e) > tol1: + golden = 0 + r = (xf - nfc) * (fx - ffulc) + q = (xf - fulc) * (fx - fnfc) + p = (xf - fulc) * q - (xf - nfc) * r + q = 2.0 * (q - r) + if q > 0.0: + p = -p + q = cupy.abs(q) + r = e + e = rat + + # Check for acceptability of parabola + if ((cupy.abs(p) < cupy.abs(0.5*q*r)) and (p > q*(a - xf)) and + (p < q * (b - xf))): + rat = (p + 0.0) / q + x = xf + rat + step = ' parabolic' + + if ((x - a) < tol2) or ((b - x) < tol2): + si = cupy.sign(xm - xf) + ((xm - xf) == 0) + rat = tol1 * si + else: # do a golden-section step + golden = 1 + + if golden: # do a golden-section step + if xf >= xm: + e = a - xf + else: + e = b - xf + rat = golden_mean*e + step = ' golden' + + si = cupy.sign(rat) + (rat == 0) + x = xf + si * cupy.maximum(cupy.abs(rat), tol1) + fu = func(x, *args) + num += 1 + fmin_data = (num, x, fu) + if disp > 2: + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) + + if fu <= fx: + if x >= xf: + a = xf + else: + b = xf + fulc, ffulc = nfc, fnfc + nfc, fnfc = xf, fx + xf, fx = x, fu + else: + if x < xf: + a = x + else: + b = x + if (fu <= fnfc) or (nfc == xf): + fulc, ffulc = nfc, fnfc + nfc, fnfc = x, fu + elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc): + fulc, ffulc = x, fu + + xm = 0.5 * (a + b) + tol1 = sqrt_eps * cupy.abs(xf) + xatol / 3.0 + tol2 = 2.0 * tol1 + + if num >= maxfun: + flag = 1 + break + + if cupy.isnan(xf) or cupy.isnan(fx) or cupy.isnan(fu): + flag = 2 + + fval = fx + if disp > 0: + _endprint(x, flag, fval, maxfun, xatol, disp) + + result = OptimizeResult(fun=fval, status=flag, success=(flag == 0), + message={0: 'Solution found.', + 1: 'Maximum number of function calls ' + 'reached.', + 2: _status_message['nan']}.get(flag, ''), + x=xf, nfev=num, nit=num) + + return result diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_peak_finding.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_peak_finding.py new file mode 100644 index 0000000000000000000000000000000000000000..60f8f6ec9ab506e9670ddc4893860e5882ae9018 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_peak_finding.py @@ -0,0 +1,1484 @@ +""" +Peak finding functions. + +Some of the functions defined here were ported directly from CuSignal under +terms of the MIT license, under the following notice: + +Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +""" + +import math +import cupy + +from cupy._core._scalar import get_typename +from cupy_backends.cuda.api import runtime + +from cupyx import jit + + +def _get_typename(dtype): + typename = get_typename(dtype) + if cupy.dtype(dtype).kind == 'c': + typename = 'thrust::' + typename + elif typename == 'float16': + if runtime.is_hip: + # 'half' in name_expressions weirdly raises + # HIPRTC_ERROR_NAME_EXPRESSION_NOT_VALID in getLoweredName() on + # ROCm + typename = '__half' + else: + typename = 'half' + return typename + + +FLOAT_TYPES = [cupy.float16, cupy.float32, cupy.float64] +INT_TYPES = [cupy.int8, cupy.int16, cupy.int32, cupy.int64] +UNSIGNED_TYPES = [cupy.uint8, cupy.uint16, cupy.uint32, cupy.uint64] +FLOAT_INT_TYPES = FLOAT_TYPES + INT_TYPES # type: ignore +TYPES = FLOAT_INT_TYPES + UNSIGNED_TYPES # type: ignore +TYPE_NAMES = [_get_typename(t) for t in TYPES] +FLOAT_INT_NAMES = [_get_typename(t) for t in FLOAT_INT_TYPES] + +_modedict = { + cupy.less: 0, + cupy.greater: 1, + cupy.less_equal: 2, + cupy.greater_equal: 3, + cupy.equal: 4, + cupy.not_equal: 5, +} + +PEAKS_KERNEL = r""" +#include +#include +#include + +template +__global__ void local_maxima_1d( + const int n, const T* __restrict__ x, long long* midpoints, + long long* left_edges, long long* right_edges) { + + const int orig_idx = blockDim.x * blockIdx.x + threadIdx.x; + const int idx = orig_idx + 1; + + if(idx >= n - 1) { + return; + } + + long long midpoint = -1; + long long left = -1; + long long right = -1; + + if(x[idx - 1] < x[idx]) { + int i_ahead = idx + 1; + + while(i_ahead < n - 1 && x[i_ahead] == x[idx]) { + i_ahead++; + } + + if(x[i_ahead] < x[idx]) { + left = idx; + right = i_ahead - 1; + midpoint = (left + right) / 2; + } + } + + midpoints[orig_idx] = midpoint; + left_edges[orig_idx] = left; + right_edges[orig_idx] = right; +} + +template +__global__ void peak_prominences( + const int n, const int n_peaks, const T* __restrict__ x, + const long long* __restrict__ peaks, const long long wlen, + T* prominences, long long* left_bases, long long* right_bases) { + + const int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx >= n_peaks) { + return; + } + + const long long peak = peaks[idx]; + long long i_min = 0; + long long i_max = n - 1; + + if(wlen >= 2) { + i_min = max(peak - wlen / 2, i_min); + i_max = min(peak + wlen / 2, i_max); + } + + left_bases[idx] = peak; + long long i = peak; + T left_min = x[peak]; + + while(i_min <= i && x[i] <= x[peak]) { + if(x[i] < left_min) { + left_min = x[i]; + left_bases[idx] = i; + } + i--; + } + + right_bases[idx] = peak; + i = peak; + T right_min = x[peak]; + + while(i <= i_max && x[i] <= x[peak]) { + if(x[i] < right_min) { + right_min = x[i]; + right_bases[idx] = i; + } + i++; + } + + prominences[idx] = x[peak] - max(left_min, right_min); +} + +template<> +__global__ void peak_prominences( + const int n, const int n_peaks, const half* __restrict__ x, + const long long* __restrict__ peaks, const long long wlen, + half* prominences, long long* left_bases, long long* right_bases) { + + const int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx >= n_peaks) { + return; + } + + const long long peak = peaks[idx]; + long long i_min = 0; + long long i_max = n - 1; + + if(wlen >= 2) { + i_min = max(peak - wlen / 2, i_min); + i_max = min(peak + wlen / 2, i_max); + } + + left_bases[idx] = peak; + long long i = peak; + half left_min = x[peak]; + + while(i_min <= i && x[i] <= x[peak]) { + if(x[i] < left_min) { + left_min = x[i]; + left_bases[idx] = i; + } + i--; + } + + right_bases[idx] = peak; + i = peak; + half right_min = x[peak]; + + while(i <= i_max && x[i] <= x[peak]) { + if(x[i] < right_min) { + right_min = x[i]; + right_bases[idx] = i; + } + i++; + } + + prominences[idx] = x[peak] - __hmax(left_min, right_min); +} + +template +__global__ void peak_widths( + const int n, const T* __restrict__ x, + const long long* __restrict__ peaks, + const double rel_height, + const T* __restrict__ prominences, + const long long* __restrict__ left_bases, + const long long* __restrict__ right_bases, + double* widths, double* width_heights, + double* left_ips, double* right_ips) { + + const int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx >= n) { + return; + } + + long long i_min = left_bases[idx]; + long long i_max = right_bases[idx]; + long long peak = peaks[idx]; + + double height = x[peak] - prominences[idx] * rel_height; + width_heights[idx] = height; + + // Find intersection point on left side + long long i = peak; + while (i_min < i && height < x[i]) { + i--; + } + + double left_ip = (double) i; + if(x[i] < height) { + // Interpolate if true intersection height is between samples + left_ip += (height - x[i]) / (x[i + 1] - x[i]); + } + + // Find intersection point on right side + i = peak; + while(i < i_max && height < x[i]) { + i++; + } + + double right_ip = (double) i; + if(x[i] < height) { + // Interpolate if true intersection height is between samples + right_ip -= (height - x[i]) / (x[i - 1] - x[i]); + } + + widths[idx] = right_ip - left_ip; + left_ips[idx] = left_ip; + right_ips[idx] = right_ip; +} + +template<> +__global__ void peak_widths( + const int n, const half* __restrict__ x, + const long long* __restrict__ peaks, + const double rel_height, + const half* __restrict__ prominences, + const long long* __restrict__ left_bases, + const long long* __restrict__ right_bases, + double* widths, double* width_heights, + double* left_ips, double* right_ips) { + + const int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx >= n) { + return; + } + + long long i_min = left_bases[idx]; + long long i_max = right_bases[idx]; + long long peak = peaks[idx]; + + double height = ((double) x[peak]) - ((double) prominences[idx]) * rel_height; + width_heights[idx] = height; + + // Find intersection point on left side + long long i = peak; + while (i_min < i && height < ((double) x[i])) { + i--; + } + + double left_ip = (double) i; + if(((double) x[i]) < height) { + // Interpolate if true intersection height is between samples + left_ip += (height - ((double) x[i])) / ((double) (x[i + 1] - x[i])); + } + + // Find intersection point on right side + i = peak; + while(i < i_max && height < ((double) x[i])) { + i++; + } + + double right_ip = (double) i; + if(((double) x[i]) < height) { + // Interpolate if true intersection height is between samples + right_ip -= (height - ((double) x[i])) / ((double) (x[i - 1] - x[i])); + } + + widths[idx] = right_ip - left_ip; + left_ips[idx] = left_ip; + right_ips[idx] = right_ip; +} +""" # NOQA + +PEAKS_MODULE = cupy.RawModule( + code=PEAKS_KERNEL, options=('-std=c++11',), + name_expressions=[f'local_maxima_1d<{x}>' for x in TYPE_NAMES] + + [f'peak_prominences<{x}>' for x in TYPE_NAMES] + + [f'peak_widths<{x}>' for x in TYPE_NAMES]) + + +ARGREL_KERNEL = r""" +#include +#include +#include + +template +__device__ __forceinline__ bool less( const T &a, const T &b ) { + return ( a < b ); +} + +template +__device__ __forceinline__ bool greater( const T &a, const T &b ) { + return ( a > b ); +} + +template +__device__ __forceinline__ bool less_equal( const T &a, const T &b ) { + return ( a <= b ); +} + +template +__device__ __forceinline__ bool greater_equal( const T &a, const T &b ) { + return ( a >= b ); +} + +template +__device__ __forceinline__ bool equal( const T &a, const T &b ) { + return ( a == b ); +} + +template +__device__ __forceinline__ bool not_equal( const T &a, const T &b ) { + return ( a != b ); +} + +__device__ __forceinline__ void clip_plus( + const bool &clip, const int &n, int &plus ) { + if ( clip ) { + if ( plus >= n ) { + plus = n - 1; + } + } else { + if ( plus >= n ) { + plus -= n; + } + } +} + +__device__ __forceinline__ void clip_minus( + const bool &clip, const int &n, int &minus ) { + if ( clip ) { + if ( minus < 0 ) { + minus = 0; + } + } else { + if ( minus < 0 ) { + minus += n; + } + } +} + +template +__device__ bool compare(const int comp, const T &a, const T &b) { + if(comp == 0) { + return less(a, b); + } else if(comp == 1) { + return greater(a, b); + } else if(comp == 2) { + return less_equal(a, b); + } else if(comp == 3) { + return greater_equal(a, b); + } else if(comp == 4) { + return equal(a, b); + } else { + return not_equal(a, b); + } +} + +template +__global__ void boolrelextrema_1D( const int n, + const int order, + const bool clip, + const int comp, + const T *__restrict__ inp, + bool *__restrict__ results) { + + const int tx { static_cast( blockIdx.x * blockDim.x + threadIdx.x ) }; + const int stride { static_cast( blockDim.x * gridDim.x ) }; + + for ( int tid = tx; tid < n; tid += stride ) { + + const T data { inp[tid] }; + bool temp { true }; + + for ( int o = 1; o < ( order + 1 ); o++ ) { + int plus { tid + o }; + int minus { tid - o }; + + clip_plus( clip, n, plus ); + clip_minus( clip, n, minus ); + + temp &= compare( comp, data, inp[plus] ); + temp &= compare( comp, data, inp[minus] ); + } + results[tid] = temp; + } +} + +template +__global__ void boolrelextrema_2D( const int in_x, + const int in_y, + const int order, + const bool clip, + const int comp, + const int axis, + const T *__restrict__ inp, + bool *__restrict__ results) { + + const int ty { static_cast( blockIdx.x * blockDim.x + threadIdx.x ) }; + const int tx { static_cast( blockIdx.y * blockDim.y + threadIdx.y ) }; + + if ( ( tx < in_y ) && ( ty < in_x ) ) { + int tid { tx * in_x + ty }; + + const T data { inp[tid] }; + bool temp { true }; + + for ( int o = 1; o < ( order + 1 ); o++ ) { + + int plus {}; + int minus {}; + + if ( axis == 0 ) { + plus = tx + o; + minus = tx - o; + + clip_plus( clip, in_y, plus ); + clip_minus( clip, in_y, minus ); + + plus = plus * in_x + ty; + minus = minus * in_x + ty; + } else { + plus = ty + o; + minus = ty - o; + + clip_plus( clip, in_x, plus ); + clip_minus( clip, in_x, minus ); + + plus = tx * in_x + plus; + minus = tx * in_x + minus; + } + + temp &= compare( comp, data, inp[plus] ); + temp &= compare( comp, data, inp[minus] ); + } + results[tid] = temp; + } +} +""" + + +ARGREL_MODULE = cupy.RawModule( + code=ARGREL_KERNEL, options=('-std=c++11',), + name_expressions=[f'boolrelextrema_1D<{x}>' for x in FLOAT_INT_NAMES] + + [f'boolrelextrema_2D<{x}>' for x in FLOAT_INT_NAMES]) + + +def _get_module_func(module, func_name, *template_args): + args_dtypes = [_get_typename(arg.dtype) for arg in template_args] + template = ', '.join(args_dtypes) + kernel_name = f'{func_name}<{template}>' if template_args else func_name + kernel = module.get_function(kernel_name) + return kernel + + +def _local_maxima_1d(x): + samples = x.shape[0] - 2 + block_sz = 128 + n_blocks = (samples + block_sz - 1) // block_sz + + midpoints = cupy.empty(samples, dtype=cupy.int64) + left_edges = cupy.empty(samples, dtype=cupy.int64) + right_edges = cupy.empty(samples, dtype=cupy.int64) + + local_max_kernel = _get_module_func(PEAKS_MODULE, 'local_maxima_1d', x) + local_max_kernel((n_blocks,), (block_sz,), + (x.shape[0], x, midpoints, left_edges, right_edges)) + + pos_idx = midpoints > 0 + midpoints = midpoints[pos_idx] + left_edges = left_edges[pos_idx] + right_edges = right_edges[pos_idx] + + return midpoints, left_edges, right_edges + + +def _unpack_condition_args(interval, x, peaks): + """ + Parse condition arguments for `find_peaks`. + + Parameters + ---------- + interval : number or ndarray or sequence + Either a number or ndarray or a 2-element sequence of the former. The + first value is always interpreted as `imin` and the second, + if supplied, as `imax`. + x : ndarray + The signal with `peaks`. + peaks : ndarray + An array with indices used to reduce `imin` and / or `imax` if those + are arrays. + + Returns + ------- + imin, imax : number or ndarray or None + Minimal and maximal value in `argument`. + + Raises + ------ + ValueError : + If interval border is given as array and its size does not match the + size of `x`. + """ + try: + imin, imax = interval + except (TypeError, ValueError): + imin, imax = (interval, None) + + # Reduce arrays if arrays + if isinstance(imin, cupy.ndarray): + if imin.size != x.size: + raise ValueError( + 'array size of lower interval border must match x') + imin = imin[peaks] + if isinstance(imax, cupy.ndarray): + if imax.size != x.size: + raise ValueError( + 'array size of upper interval border must match x') + imax = imax[peaks] + + return imin, imax + + +def _select_by_property(peak_properties, pmin, pmax): + """ + Evaluate where the generic property of peaks confirms to an interval. + + Parameters + ---------- + peak_properties : ndarray + An array with properties for each peak. + pmin : None or number or ndarray + Lower interval boundary for `peak_properties`. ``None`` + is interpreted as an open border. + pmax : None or number or ndarray + Upper interval boundary for `peak_properties`. ``None`` + is interpreted as an open border. + + Returns + ------- + keep : bool + A boolean mask evaluating to true where `peak_properties` confirms + to the interval. + + See Also + -------- + find_peaks + + """ + keep = cupy.ones(peak_properties.size, dtype=bool) + if pmin is not None: + keep &= (pmin <= peak_properties) + if pmax is not None: + keep &= (peak_properties <= pmax) + return keep + + +def _select_by_peak_threshold(x, peaks, tmin, tmax): + """ + Evaluate which peaks fulfill the threshold condition. + + Parameters + ---------- + x : ndarray + A 1-D array which is indexable by `peaks`. + peaks : ndarray + Indices of peaks in `x`. + tmin, tmax : scalar or ndarray or None + Minimal and / or maximal required thresholds. If supplied as ndarrays + their size must match `peaks`. ``None`` is interpreted as an open + border. + + Returns + ------- + keep : bool + A boolean mask evaluating to true where `peaks` fulfill the threshold + condition. + left_thresholds, right_thresholds : ndarray + Array matching `peak` containing the thresholds of each peak on + both sides. + + """ + # Stack thresholds on both sides to make min / max operations easier: + # tmin is compared with the smaller, and tmax with the greater threshold to + # each peak's side + stacked_thresholds = cupy.vstack([x[peaks] - x[peaks - 1], + x[peaks] - x[peaks + 1]]) + keep = cupy.ones(peaks.size, dtype=bool) + if tmin is not None: + min_thresholds = cupy.min(stacked_thresholds, axis=0) + keep &= (tmin <= min_thresholds) + if tmax is not None: + max_thresholds = cupy.max(stacked_thresholds, axis=0) + keep &= (max_thresholds <= tmax) + + return keep, stacked_thresholds[0], stacked_thresholds[1] + + +def _select_by_peak_distance(peaks, priority, distance): + """ + Evaluate which peaks fulfill the distance condition. + + Parameters + ---------- + peaks : ndarray + Indices of peaks in `vector`. + priority : ndarray + An array matching `peaks` used to determine priority of each peak. A + peak with a higher priority value is kept over one with a lower one. + distance : np.float64 + Minimal distance that peaks must be spaced. + + Returns + ------- + keep : ndarray[bool] + A boolean mask evaluating to true where `peaks` fulfill the distance + condition. + + Notes + ----- + Declaring the input arrays as C-contiguous doesn't seem to have performance + advantages. + """ + peaks_size = peaks.shape[0] + # Round up because actual peak distance can only be natural number + distance_ = cupy.ceil(distance) + keep = cupy.ones(peaks_size, dtype=cupy.bool_) # Prepare array of flags + + # Create map from `i` (index for `peaks` sorted by `priority`) to `j` + # (index for `peaks` sorted by position). This allows to iterate `peaks` + # and `keep` with `j` by order of `priority` while still maintaining the + # ability to step to neighbouring peaks with (`j` + 1) or (`j` - 1). + priority_to_position = cupy.argsort(priority) + + # Highest priority first -> iterate in reverse order (decreasing) + + # NOTE: There's not an alternative way to do this procedure in a parallel + # fashion, since discarding a peak requires to know if there's a valid + # neighbour that subsumes it, which in turn requires to know + # if that neighbour is valid. If it was to done in parallel, there would be + # tons of repeated computations per peak, thus increasing the total runtime + # per peak compared to a sequential implementation. + for i in range(peaks_size - 1, -1, -1): + # "Translate" `i` to `j` which points to current peak whose + # neighbours are to be evaluated + j = priority_to_position[i] + if keep[j] == 0: + # Skip evaluation for peak already marked as "don't keep" + continue + + k = j - 1 + # Flag "earlier" peaks for removal until minimal distance is exceeded + while 0 <= k and peaks[j] - peaks[k] < distance_: + keep[k] = 0 + k -= 1 + + k = j + 1 + # Flag "later" peaks for removal until minimal distance is exceeded + while k < peaks_size and peaks[k] - peaks[j] < distance_: + keep[k] = 0 + k += 1 + return keep + + +def _arg_x_as_expected(value): + """Ensure argument `x` is a 1-D C-contiguous array. + + Returns + ------- + value : ndarray + A 1-D C-contiguous array. + """ + value = cupy.asarray(value, order='C') + if value.ndim != 1: + raise ValueError('`x` must be a 1-D array') + return value + + +def _arg_wlen_as_expected(value): + """Ensure argument `wlen` is of type `np.intp` and larger than 1. + + Used in `peak_prominences` and `peak_widths`. + + Returns + ------- + value : np.intp + The original `value` rounded up to an integer or -1 if `value` was + None. + """ + if value is None: + # _peak_prominences expects an intp; -1 signals that no value was + # supplied by the user + value = -1 + elif 1 < value: + # Round up to a positive integer + if not cupy.can_cast(value, cupy.int64, "safe"): + value = math.ceil(value) + value = int(value) + else: + raise ValueError('`wlen` must be larger than 1, was {}' + .format(value)) + return value + + +def _arg_peaks_as_expected(value): + """Ensure argument `peaks` is a 1-D C-contiguous array of dtype('int64'). + + Used in `peak_prominences` and `peak_widths` to make `peaks` compatible + with the signature of the wrapped Cython functions. + + Returns + ------- + value : ndarray + A 1-D C-contiguous array with dtype('int64'). + """ + value = cupy.asarray(value) + if value.size == 0: + # Empty arrays default to cupy.float64 but are valid input + value = cupy.array([], dtype=cupy.int64) + try: + # Safely convert to C-contiguous array of type cupy.int64 + value = value.astype(cupy.int64, order='C', copy=False) + except TypeError as e: + raise TypeError("cannot safely cast `peaks` to dtype('intp')") from e + if value.ndim != 1: + raise ValueError('`peaks` must be a 1-D array') + return value + + +@jit.rawkernel() +def _check_prominence_invalid(n, peaks, left_bases, right_bases, out): + tid = jit.blockIdx.x * jit.blockDim.x + jit.threadIdx.x + i_min = left_bases[tid] + i_max = right_bases[tid] + peak = peaks[tid] + valid = 0 <= i_min and i_min <= peak and peak <= i_max and i_max < n + out[tid] = not valid + + +def _peak_prominences(x, peaks, wlen=None, check=False): + if check and cupy.any(cupy.logical_or(peaks < 0, peaks > x.shape[0] - 1)): + raise ValueError('peaks are not a valid index') + + prominences = cupy.empty(peaks.shape[0], dtype=x.dtype) + left_bases = cupy.empty(peaks.shape[0], dtype=cupy.int64) + right_bases = cupy.empty(peaks.shape[0], dtype=cupy.int64) + + n = peaks.shape[0] + block_sz = 128 + n_blocks = (n + block_sz - 1) // block_sz + + peak_prom_kernel = _get_module_func(PEAKS_MODULE, 'peak_prominences', x) + peak_prom_kernel( + (n_blocks,), (block_sz,), + (x.shape[0], n, x, peaks, wlen, prominences, left_bases, right_bases)) + + return prominences, left_bases, right_bases + + +def _peak_widths(x, peaks, rel_height, prominences, left_bases, right_bases, + check=False): + if rel_height < 0: + raise ValueError('`rel_height` must be greater or equal to 0.0') + if prominences is None: + raise TypeError('prominences must not be None') + if left_bases is None: + raise TypeError('left_bases must not be None') + if right_bases is None: + raise TypeError('right_bases must not be None') + if not (peaks.shape[0] == prominences.shape[0] == left_bases.shape[0] + == right_bases.shape[0]): + raise ValueError("arrays in `prominence_data` must have the same " + "shape as `peaks`") + + n = peaks.shape[0] + block_sz = 128 + n_blocks = (n + block_sz - 1) // block_sz + + if check and n > 0: + invalid = cupy.zeros(n, dtype=cupy.bool_) + _check_prominence_invalid( + (n_blocks,), (block_sz,), + (x.shape[0], peaks, left_bases, right_bases, invalid)) + if cupy.any(invalid): + raise ValueError("prominence data is invalid") + + widths = cupy.empty(peaks.shape[0], dtype=cupy.float64) + width_heights = cupy.empty(peaks.shape[0], dtype=cupy.float64) + left_ips = cupy.empty(peaks.shape[0], dtype=cupy.float64) + right_ips = cupy.empty(peaks.shape[0], dtype=cupy.float64) + + peak_widths_kernel = _get_module_func(PEAKS_MODULE, 'peak_widths', x) + peak_widths_kernel( + (n_blocks,), (block_sz,), + (n, x, peaks, rel_height, prominences, left_bases, right_bases, + widths, width_heights, left_ips, right_ips)) + return widths, width_heights, left_ips, right_ips + + +def peak_prominences(x, peaks, wlen=None): + """ + Calculate the prominence of each peak in a signal. + + The prominence of a peak measures how much a peak stands out from the + surrounding baseline of the signal and is defined as the vertical distance + between the peak and its lowest contour line. + + Parameters + ---------- + x : sequence + A signal with peaks. + peaks : sequence + Indices of peaks in `x`. + wlen : int, optional + A window length in samples that optionally limits the evaluated area + for each peak to a subset of `x`. The peak is always placed in the + middle of the window therefore the given length is rounded up to the + next odd integer. This parameter can speed up the calculation + (see Notes). + + Returns + ------- + prominences : ndarray + The calculated prominences for each peak in `peaks`. + left_bases, right_bases : ndarray + The peaks' bases as indices in `x` to the left and right of each peak. + The higher base of each pair is a peak's lowest contour line. + + Raises + ------ + ValueError + If a value in `peaks` is an invalid index for `x`. + + Warns + ----- + PeakPropertyWarning + For indices in `peaks` that don't point to valid local maxima in `x`, + the returned prominence will be 0 and this warning is raised. This + also happens if `wlen` is smaller than the plateau size of a peak. + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks + Find peaks inside a signal based on peak properties. + peak_widths + Calculate the width of peaks. + + Notes + ----- + Strategy to compute a peak's prominence: + + 1. Extend a horizontal line from the current peak to the left and right + until the line either reaches the window border (see `wlen`) or + intersects the signal again at the slope of a higher peak. An + intersection with a peak of the same height is ignored. + 2. On each side find the minimal signal value within the interval defined + above. These points are the peak's bases. + 3. The higher one of the two bases marks the peak's lowest contour line. + The prominence can then be calculated as the vertical difference between + the peaks height itself and its lowest contour line. + + Searching for the peak's bases can be slow for large `x` with periodic + behavior because large chunks or even the full signal need to be evaluated + for the first algorithmic step. This evaluation area can be limited with + the parameter `wlen` which restricts the algorithm to a window around the + current peak and can shorten the calculation time if the window length is + short in relation to `x`. + However, this may stop the algorithm from finding the true global contour + line if the peak's true bases are outside this window. Instead, a higher + contour line is found within the restricted window leading to a smaller + calculated prominence. In practice, this is only relevant for the highest + set of peaks in `x`. This behavior may even be used intentionally to + calculate "local" prominences. + + """ + x = _arg_x_as_expected(x) + peaks = _arg_peaks_as_expected(peaks) + wlen = _arg_wlen_as_expected(wlen) + return _peak_prominences(x, peaks, wlen, check=True) + + +def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None): + """ + Calculate the width of each peak in a signal. + + This function calculates the width of a peak in samples at a relative + distance to the peak's height and prominence. + + Parameters + ---------- + x : sequence + A signal with peaks. + peaks : sequence + Indices of peaks in `x`. + rel_height : float, optional + Chooses the relative height at which the peak width is measured as a + percentage of its prominence. 1.0 calculates the width of the peak at + its lowest contour line while 0.5 evaluates at half the prominence + height. Must be at least 0. See notes for further explanation. + prominence_data : tuple, optional + A tuple of three arrays matching the output of `peak_prominences` when + called with the same arguments `x` and `peaks`. This data are + calculated internally if not provided. + wlen : int, optional + A window length in samples passed to `peak_prominences` as an optional + argument for internal calculation of `prominence_data`. This argument + is ignored if `prominence_data` is given. + + Returns + ------- + widths : ndarray + The widths for each peak in samples. + width_heights : ndarray + The height of the contour lines at which the `widths` where evaluated. + left_ips, right_ips : ndarray + Interpolated positions of left and right intersection points of a + horizontal line at the respective evaluation height. + + Raises + ------ + ValueError + If `prominence_data` is supplied but doesn't satisfy the condition + ``0 <= left_base <= peak <= right_base < x.shape[0]`` for each peak, + has the wrong dtype, is not C-contiguous or does not have the same + shape. + + Warns + ----- + PeakPropertyWarning + Raised if any calculated width is 0. This may stem from the supplied + `prominence_data` or if `rel_height` is set to 0. + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks + Find peaks inside a signal based on peak properties. + peak_prominences + Calculate the prominence of peaks. + + Notes + ----- + The basic algorithm to calculate a peak's width is as follows: + + * Calculate the evaluation height :math:`h_{eval}` with the formula + :math:`h_{eval} = h_{Peak} - P \\cdot R`, where :math:`h_{Peak}` is the + height of the peak itself, :math:`P` is the peak's prominence and + :math:`R` a positive ratio specified with the argument `rel_height`. + * Draw a horizontal line at the evaluation height to both sides, starting + at the peak's current vertical position until the lines either intersect + a slope, the signal border or cross the vertical position of the peak's + base (see `peak_prominences` for an definition). For the first case, + intersection with the signal, the true intersection point is estimated + with linear interpolation. + * Calculate the width as the horizontal distance between the chosen + endpoints on both sides. As a consequence of this the maximal possible + width for each peak is the horizontal distance between its bases. + + As shown above to calculate a peak's width its prominence and bases must be + known. You can supply these yourself with the argument `prominence_data`. + Otherwise, they are internally calculated (see `peak_prominences`). + """ + x = _arg_x_as_expected(x) + peaks = _arg_peaks_as_expected(peaks) + if prominence_data is None: + # Calculate prominence if not supplied and use wlen if supplied. + wlen = _arg_wlen_as_expected(wlen) + prominence_data = _peak_prominences(x, peaks, wlen, check=True) + return _peak_widths(x, peaks, rel_height, *prominence_data, check=True) + + +def find_peaks(x, height=None, threshold=None, distance=None, + prominence=None, width=None, wlen=None, rel_height=0.5, + plateau_size=None): + """ + Find peaks inside a signal based on peak properties. + + This function takes a 1-D array and finds all local maxima by + simple comparison of neighboring values. Optionally, a subset of these + peaks can be selected by specifying conditions for a peak's properties. + + Parameters + ---------- + x : sequence + A signal with peaks. + height : number or ndarray or sequence, optional + Required height of peaks. Either a number, ``None``, an array matching + `x` or a 2-element sequence of the former. The first element is + always interpreted as the minimal and the second, if supplied, as the + maximal required height. + threshold : number or ndarray or sequence, optional + Required threshold of peaks, the vertical distance to its neighboring + samples. Either a number, ``None``, an array matching `x` or a + 2-element sequence of the former. The first element is always + interpreted as the minimal and the second, if supplied, as the maximal + required threshold. + distance : number, optional + Required minimal horizontal distance (>= 1) in samples between + neighbouring peaks. Smaller peaks are removed first until the condition + is fulfilled for all remaining peaks. + prominence : number or ndarray or sequence, optional + Required prominence of peaks. Either a number, ``None``, an array + matching `x` or a 2-element sequence of the former. The first + element is always interpreted as the minimal and the second, if + supplied, as the maximal required prominence. + width : number or ndarray or sequence, optional + Required width of peaks in samples. Either a number, ``None``, an array + matching `x` or a 2-element sequence of the former. The first + element is always interpreted as the minimal and the second, if + supplied, as the maximal required width. + wlen : int, optional + Used for calculation of the peaks prominences, thus it is only used if + one of the arguments `prominence` or `width` is given. See argument + `wlen` in `peak_prominences` for a full description of its effects. + rel_height : float, optional + Used for calculation of the peaks width, thus it is only used if + `width` is given. See argument `rel_height` in `peak_widths` for + a full description of its effects. + plateau_size : number or ndarray or sequence, optional + Required size of the flat top of peaks in samples. Either a number, + ``None``, an array matching `x` or a 2-element sequence of the former. + The first element is always interpreted as the minimal and the second, + if supplied as the maximal required plateau size. + + .. versionadded:: 1.2.0 + + Returns + ------- + peaks : ndarray + Indices of peaks in `x` that satisfy all given conditions. + properties : dict + A dictionary containing properties of the returned peaks which were + calculated as intermediate results during evaluation of the specified + conditions: + + * 'peak_heights' + If `height` is given, the height of each peak in `x`. + * 'left_thresholds', 'right_thresholds' + If `threshold` is given, these keys contain a peaks vertical + distance to its neighbouring samples. + * 'prominences', 'right_bases', 'left_bases' + If `prominence` is given, these keys are accessible. See + `peak_prominences` for a description of their content. + * 'width_heights', 'left_ips', 'right_ips' + If `width` is given, these keys are accessible. See `peak_widths` + for a description of their content. + * 'plateau_sizes', left_edges', 'right_edges' + If `plateau_size` is given, these keys are accessible and contain + the indices of a peak's edges (edges are still part of the + plateau) and the calculated plateau sizes. + + To calculate and return properties without excluding peaks, provide the + open interval ``(None, None)`` as a value to the appropriate argument + (excluding `distance`). + + Warns + ----- + PeakPropertyWarning + Raised if a peak's properties have unexpected values (see + `peak_prominences` and `peak_widths`). + + Warnings + -------- + This function may return unexpected results for data containing NaNs. To + avoid this, NaNs should either be removed or replaced. + + See Also + -------- + find_peaks_cwt + Find peaks using the wavelet transformation. + peak_prominences + Directly calculate the prominence of peaks. + peak_widths + Directly calculate the width of peaks. + + Notes + ----- + In the context of this function, a peak or local maximum is defined as any + sample whose two direct neighbours have a smaller amplitude. For flat peaks + (more than one sample of equal amplitude wide) the index of the middle + sample is returned (rounded down in case the number of samples is even). + For noisy signals the peak locations can be off because the noise might + change the position of local maxima. In those cases consider smoothing the + signal before searching for peaks or use other peak finding and fitting + methods (like `find_peaks_cwt`). + + Some additional comments on specifying conditions: + + * Almost all conditions (excluding `distance`) can be given as half-open or + closed intervals, e.g., ``1`` or ``(1, None)`` defines the half-open + interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval + :math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified + as well, which returns the matching properties without exclusion of peaks. + * The border is always included in the interval used to select valid peaks. + * For several conditions the interval borders can be specified with + arrays matching `x` in shape which enables dynamic constrains based on + the sample position. + * The conditions are evaluated in the following order: `plateau_size`, + `height`, `threshold`, `distance`, `prominence`, `width`. In most cases + this order is the fastest one because faster operations are applied first + to reduce the number of peaks that need to be evaluated later. + * While indices in `peaks` are guaranteed to be at least `distance` samples + apart, edges of flat peaks may be closer than the allowed `distance`. + * Use `wlen` to reduce the time it takes to evaluate the conditions for + `prominence` or `width` if `x` is large or has many local maxima + (see `peak_prominences`). + """ # NOQA + + x = _arg_x_as_expected(x) + if distance is not None and distance < 1: + raise ValueError('`distance` must be greater or equal to 1') + + peaks, left_edges, right_edges = _local_maxima_1d(x) + properties = {} + + if plateau_size is not None: + # Evaluate plateau size + plateau_sizes = right_edges - left_edges + 1 + pmin, pmax = _unpack_condition_args(plateau_size, x, peaks) + keep = _select_by_property(plateau_sizes, pmin, pmax) + peaks = peaks[keep] + properties["plateau_sizes"] = plateau_sizes + properties["left_edges"] = left_edges + properties["right_edges"] = right_edges + properties = {key: array[keep] for key, array in properties.items()} + + if height is not None: + # Evaluate height condition + peak_heights = x[peaks] + hmin, hmax = _unpack_condition_args(height, x, peaks) + keep = _select_by_property(peak_heights, hmin, hmax) + peaks = peaks[keep] + properties["peak_heights"] = peak_heights + properties = {key: array[keep] for key, array in properties.items()} + + if threshold is not None: + # Evaluate threshold condition + tmin, tmax = _unpack_condition_args(threshold, x, peaks) + keep, left_thresholds, right_thresholds = _select_by_peak_threshold( + x, peaks, tmin, tmax) + peaks = peaks[keep] + properties["left_thresholds"] = left_thresholds + properties["right_thresholds"] = right_thresholds + properties = {key: array[keep] for key, array in properties.items()} + + if distance is not None: + # Evaluate distance condition + keep = _select_by_peak_distance(peaks, x[peaks], distance) # NOQA + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + if prominence is not None or width is not None: + # Calculate prominence (required for both conditions) + wlen = _arg_wlen_as_expected(wlen) # NOQA + properties.update(zip( + ['prominences', 'left_bases', 'right_bases'], + _peak_prominences(x, peaks, wlen=wlen) # NOQA + )) + + if prominence is not None: + # Evaluate prominence condition + pmin, pmax = _unpack_condition_args(prominence, x, peaks) # NOQA + keep = _select_by_property(properties['prominences'], pmin, pmax) # NOQA + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + if width is not None: + # Calculate widths + properties.update(zip( + ['widths', 'width_heights', 'left_ips', 'right_ips'], + _peak_widths(x, peaks, rel_height, properties['prominences'], # NOQA + properties['left_bases'], properties['right_bases']) + )) + # Evaluate width condition + wmin, wmax = _unpack_condition_args(width, x, peaks) # NOQA + keep = _select_by_property(properties['widths'], wmin, wmax) # NOQA + peaks = peaks[keep] + properties = {key: array[keep] for key, array in properties.items()} + + return peaks, properties + + +def _peak_finding(data, comparator, axis, order, mode, results): + comp = _modedict[comparator] + clip = mode == 'clip' + + device_id = cupy.cuda.Device() + num_blocks = (device_id.attributes["MultiProcessorCount"] * 20,) + block_sz = (512,) + call_args = data.shape[axis], order, clip, comp, data, results + + kernel_name = "boolrelextrema_1D" + if data.ndim > 1: + kernel_name = "boolrelextrema_2D" + block_sz_x, block_sz_y = 16, 16 + n_blocks_x = (data.shape[1] + block_sz_x - 1) // block_sz_x + n_blocks_y = (data.shape[0] + block_sz_y - 1) // block_sz_y + block_sz = (block_sz_x, block_sz_y) + num_blocks = (n_blocks_x, n_blocks_y) + call_args = (data.shape[1], data.shape[0], order, clip, comp, axis, + data, results) + + boolrelextrema = _get_module_func(ARGREL_MODULE, kernel_name, data) + boolrelextrema(num_blocks, block_sz, call_args) + + +def _boolrelextrema(data, comparator, axis=0, order=1, mode="clip"): + """ + Calculate the relative extrema of `data`. + + Relative extrema are calculated by finding locations where + ``comparator(data[n], data[n+1:n+order+1])`` is True. + + Parameters + ---------- + data : ndarray + Array in which to find the relative extrema. + comparator : callable + Function to use to compare two data points. + Should take two arrays as arguments. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n,n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. 'wrap' (wrap around) or + 'clip' (treat overflow as the same as the last (or first) element). + Default 'clip'. See cupy.take. + + Returns + ------- + extrema : ndarray + Boolean array of the same shape as `data` that is True at an extrema, + False otherwise. + + See also + -------- + argrelmax, argrelmin + """ + if (int(order) != order) or (order < 1): + raise ValueError("Order must be an int >= 1") + + if data.ndim < 3: + results = cupy.empty(data.shape, dtype=bool) + _peak_finding(data, comparator, axis, order, mode, results) + else: + datalen = data.shape[axis] + locs = cupy.arange(0, datalen) + results = cupy.ones(data.shape, dtype=bool) + main = cupy.take(data, locs, axis=axis) + for shift in cupy.arange(1, order + 1): + if mode == "clip": + p_locs = cupy.clip(locs + shift, a_min=None, + a_max=(datalen - 1)) + m_locs = cupy.clip(locs - shift, a_min=0, a_max=None) + else: + p_locs = locs + shift + m_locs = locs - shift + plus = cupy.take(data, p_locs, axis=axis) + minus = cupy.take(data, m_locs, axis=axis) + results &= comparator(main, plus) + results &= comparator(main, minus) + + if ~results.any(): + return results + + return results + + +def argrelmin(data, axis=0, order=1, mode="clip"): + """ + Calculate the relative minima of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative minima. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. + Available options are 'wrap' (wrap around) or 'clip' (treat overflow + as the same as the last (or first) element). + Default 'clip'. See cupy.take. + + + Returns + ------- + extrema : tuple of ndarrays + Indices of the minima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is one-dimensional. + + See Also + -------- + argrelextrema, argrelmax, find_peaks + + Notes + ----- + This function uses `argrelextrema` with cupy.less as comparator. Therefore + it requires a strict inequality on both sides of a value to consider it a + minimum. This means flat minima (more than one sample wide) are not + detected. In case of one-dimensional `data` `find_peaks` can be used to + detect all local minima, including flat ones, by calling it with negated + `data`. + + Examples + -------- + >>> from cupyx.scipy.signal import argrelmin + >>> import cupy + >>> x = cupy.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelmin(x) + (array([1, 5]),) + >>> y = cupy.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelmin(y, axis=1) + (array([0, 2]), array([2, 1])) + + """ + data = cupy.asarray(data) + return argrelextrema(data, cupy.less, axis, order, mode) + + +def argrelmax(data, axis=0, order=1, mode="clip"): + """ + Calculate the relative maxima of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative maxima. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. + Available options are 'wrap' (wrap around) or 'clip' (treat overflow + as the same as the last (or first) element). + Default 'clip'. See cupy.take. + + Returns + ------- + extrema : tuple of ndarrays + Indices of the maxima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is one-dimensional. + + See Also + -------- + argrelextrema, argrelmin, find_peaks + + Notes + ----- + This function uses `argrelextrema` with cupy.greater as comparator. + Therefore it requires a strict inequality on both sides of a value to + consider it a maximum. This means flat maxima (more than one sample wide) + are not detected. In case of one-dimensional `data` `find_peaks` can be + used to detect all local maxima, including flat ones. + + Examples + -------- + >>> from cupyx.scipy.signal import argrelmax + >>> import cupy + >>> x = cupy.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelmax(x) + (array([3, 6]),) + >>> y = cupy.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelmax(y, axis=1) + (array([0]), array([1])) + """ + data = cupy.asarray(data) + return argrelextrema(data, cupy.greater, axis, order, mode) + + +def argrelextrema(data, comparator, axis=0, order=1, mode="clip"): + """ + Calculate the relative extrema of `data`. + + Parameters + ---------- + data : ndarray + Array in which to find the relative extrema. + comparator : callable + Function to use to compare two data points. + Should take two arrays as arguments. + axis : int, optional + Axis over which to select from `data`. Default is 0. + order : int, optional + How many points on each side to use for the comparison + to consider ``comparator(n, n+x)`` to be True. + mode : str, optional + How the edges of the vector are treated. + Available options are 'wrap' (wrap around) or 'clip' (treat overflow + as the same as the last (or first) element). + Default 'clip'. See cupy.take. + + Returns + ------- + extrema : tuple of ndarrays + Indices of the maxima in arrays of integers. ``extrema[k]`` is + the array of indices of axis `k` of `data`. Note that the + return value is a tuple even when `data` is one-dimensional. + + See Also + -------- + argrelmin, argrelmax + + Examples + -------- + >>> from cupyx.scipy.signal import argrelextrema + >>> import cupy + >>> x = cupy.array([2, 1, 2, 3, 2, 0, 1, 0]) + >>> argrelextrema(x, cupy.greater) + (array([3, 6]),) + >>> y = cupy.array([[1, 2, 1, 2], + ... [2, 2, 0, 0], + ... [5, 3, 4, 4]]) + ... + >>> argrelextrema(y, cupy.less, axis=1) + (array([0, 2]), array([2, 1])) + + """ + data = cupy.asarray(data) + results = _boolrelextrema(data, comparator, axis, order, mode) + + if mode == "raise": + raise NotImplementedError( + "CuPy `take` doesn't support `mode='raise'`.") + + return cupy.nonzero(results) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_polyutils.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_polyutils.py new file mode 100644 index 0000000000000000000000000000000000000000..9c1cab13a1ced32785d283e209311d681cd2afa2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_polyutils.py @@ -0,0 +1,577 @@ +""" +Routines for manipulating partial fraction expansions. +""" + +import cupy + + +def roots(arr): + """np.roots replacement. XXX: calls into NumPy, then converts back. + """ + import numpy as np + + arr = cupy.asarray(arr).get() + return cupy.asarray(np.roots(arr)) + + +def poly(A): + """np.poly replacement for 2D A. Otherwise, use cupy.poly.""" + sh = A.shape + if not (len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0): + raise ValueError("input must be a non-empty square 2d array.") + + import numpy as np + + seq_of_zeros = np.linalg.eigvals(A.get()) + + dt = seq_of_zeros.dtype + a = np.ones((1,), dtype=dt) + for zero in seq_of_zeros: + a = np.convolve(a, np.r_[1, -zero], mode='full') + + if issubclass(a.dtype.type, cupy.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = np.asarray(seq_of_zeros, dtype=complex) + if np.all(np.sort(roots) == np.sort(roots.conjugate())): + a = a.real.copy() + + return cupy.asarray(a) + + +def _cmplx_sort(p): + """Sort roots based on magnitude. + """ + indx = cupy.argsort(cupy.abs(p)) + return cupy.take(p, indx, 0), indx + + +# np.polydiv clone +def _polydiv(u, v): + u = cupy.atleast_1d(u) + 0.0 + v = cupy.atleast_1d(v) + 0.0 + # w has the common type + w = u[0] + v[0] + m = len(u) - 1 + n = len(v) - 1 + scale = 1. / v[0] + q = cupy.zeros((max(m - n + 1, 1),), w.dtype) + r = u.astype(w.dtype) + for k in range(0, m-n+1): + d = scale * r[k] + q[k] = d + r[k:k + n + 1] -= d * v + while cupy.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): + r = r[1:] + return q, r + + +def unique_roots(p, tol=1e-3, rtype='min'): + """Determine unique roots and their multiplicities from a list of roots. + + Parameters + ---------- + p : array_like + The list of roots. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. Refer to Notes about + the details on roots grouping. + rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional + How to determine the returned root if multiple roots are within + `tol` of each other. + + - 'max', 'maximum': pick the maximum of those roots + - 'min', 'minimum': pick the minimum of those roots + - 'avg', 'mean': take the average of those roots + + When finding minimum or maximum among complex roots they are compared + first by the real part and then by the imaginary part. + + Returns + ------- + unique : ndarray + The list of unique roots. + multiplicity : ndarray + The multiplicity of each root. + + See Also + -------- + scipy.signal.unique_roots + + Notes + ----- + If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to + ``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it + doesn't necessarily mean that ``a`` is close to ``c``. It means that roots + grouping is not unique. In this function we use "greedy" grouping going + through the roots in the order they are given in the input `p`. + + This utility function is not specific to roots but can be used for any + sequence of values for which uniqueness and multiplicity has to be + determined. For a more general routine, see `numpy.unique`. + + """ + if rtype in ['max', 'maximum']: + reduce = cupy.max + elif rtype in ['min', 'minimum']: + reduce = cupy.min + elif rtype in ['avg', 'mean']: + reduce = cupy.mean + else: + raise ValueError("`rtype` must be one of " + "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") + + points = cupy.empty((p.shape[0], 2)) + points[:, 0] = cupy.real(p) + points[:, 1] = cupy.imag(p) + + # Replacement for dist = cdist(points, points) to avoid needing `pylibraft` + dist = cupy.linalg.norm(points[:, None, :] - points[None, :, :], axis=-1) + + p_unique = [] + p_multiplicity = [] + used = cupy.zeros(p.shape[0], dtype=bool) + + for i, ds in enumerate(dist): + if used[i]: + continue + + mask = (ds < tol) & ~used + group = ds[mask] + if group.size > 0: + # print(j, ' : ', group, p[mask]) + p_unique.append(reduce(p[mask])) + p_multiplicity.append(group.shape[0]) + used[mask] = True + + return cupy.asarray(p_unique), cupy.asarray(p_multiplicity) + + +def _compute_factors(roots, multiplicity, include_powers=False): + """Compute the total polynomial divided by factors for each root.""" + current = cupy.array([1]) + suffixes = [current] + for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]): + monomial = cupy.r_[1, -pole] + for _ in range(int(mult)): + current = cupy.polymul(current, monomial) + suffixes.append(current) + suffixes = suffixes[::-1] + + factors = [] + current = cupy.array([1]) + for pole, mult, suffix in zip(roots, multiplicity, suffixes): + monomial = cupy.r_[1, -pole] + block = [] + for i in range(int(mult)): + if i == 0 or include_powers: + block.append(cupy.polymul(current, suffix)) + current = cupy.polymul(current, monomial) + factors.extend(reversed(block)) + + return factors, current + + +def _compute_residues(poles, multiplicity, numerator): + denominator_factors, _ = _compute_factors(poles, multiplicity) + numerator = numerator.astype(poles.dtype) + + residues = [] + for pole, mult, factor in zip(poles, multiplicity, + denominator_factors): + if mult == 1: + residues.append(cupy.polyval(numerator, pole) / + cupy.polyval(factor, pole)) + else: + numer = numerator.copy() + monomial = cupy.r_[1, -pole] + factor, d = _polydiv(factor, monomial) + + block = [] + for _ in range(int(mult)): + numer, n = _polydiv(numer, monomial) + r = n[0] / d[0] + numer = cupy.polysub(numer, r * factor) + block.append(r) + + residues.extend(reversed(block)) + + return cupy.asarray(residues) + + +def invres(r, p, k, tol=1e-3, rtype='avg'): + """Compute b(s) and a(s) from partial fraction expansion. + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] + H(s) = ------ = ------------------------------------------ + a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] + + then the partial-fraction expansion H(s) is defined as:: + + r[0] r[1] r[-1] + = -------- + -------- + ... + --------- + k(s) + (s-p[0]) (s-p[1]) (s-p[-1]) + + If there are any repeated roots (closer together than `tol`), then H(s) + has terms like:: + + r[i] r[i+1] r[i+n-1] + -------- + ----------- + ... + ----------- + (s-p[i]) (s-p[i])**2 (s-p[i])**n + + This function is used for polynomials in positive powers of s or z, + such as analog filters or digital filters in controls engineering. For + negative powers of z (typical for digital filters in DSP), use `invresz`. + + Parameters + ---------- + r : array_like + Residues corresponding to the poles. For repeated poles, the residues + must be ordered to correspond to ascending by power fractions. + p : array_like + Poles. Equal poles must be adjacent. + k : array_like + Coefficients of the direct polynomial term. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + scipy.signal.invres + residue, invresz, unique_roots + + """ + r = cupy.atleast_1d(r) + p = cupy.atleast_1d(p) + k = cupy.trim_zeros(cupy.atleast_1d(k), 'f') + + unique_poles, multiplicity = unique_roots(p, tol, rtype) + factors, denominator = _compute_factors(unique_poles, multiplicity, + include_powers=True) + + if len(k) == 0: + numerator = 0 + else: + numerator = cupy.polymul(k, denominator) + + for residue, factor in zip(r, factors): + numerator = cupy.polyadd(numerator, residue * factor) + + return numerator, denominator + + +def invresz(r, p, k, tol=1e-3, rtype='avg'): + """Compute b(z) and a(z) from partial fraction expansion. + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) + H(z) = ------ = ------------------------------------------ + a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) + + then the partial-fraction expansion H(z) is defined as:: + + r[0] r[-1] + = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... + (1-p[0]z**(-1)) (1-p[-1]z**(-1)) + + If there are any repeated roots (closer than `tol`), then the partial + fraction expansion has terms like:: + + r[i] r[i+1] r[i+n-1] + -------------- + ------------------ + ... + ------------------ + (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n + + This function is used for polynomials in negative powers of z, + such as digital filters in DSP. For positive powers, use `invres`. + + Parameters + ---------- + r : array_like + Residues corresponding to the poles. For repeated poles, the residues + must be ordered to correspond to ascending by power fractions. + p : array_like + Poles. Equal poles must be adjacent. + k : array_like + Coefficients of the direct polynomial term. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + b : ndarray + Numerator polynomial coefficients. + a : ndarray + Denominator polynomial coefficients. + + See Also + -------- + scipy.signal.invresz + residuez, unique_roots, invres + + """ + r = cupy.atleast_1d(r) + p = cupy.atleast_1d(p) + k = cupy.trim_zeros(cupy.atleast_1d(k), 'b') + + unique_poles, multiplicity = unique_roots(p, tol, rtype) + factors, denominator = _compute_factors(unique_poles, multiplicity, + include_powers=True) + + if len(k) == 0: + numerator = 0 + else: + numerator = cupy.polymul(k[::-1], denominator[::-1]) + + for residue, factor in zip(r, factors): + numerator = cupy.polyadd(numerator, residue * factor[::-1]) + + return numerator[::-1], denominator + + +def residue(b, a, tol=1e-3, rtype='avg'): + """Compute partial-fraction expansion of b(s) / a(s). + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] + H(s) = ------ = ------------------------------------------ + a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] + + then the partial-fraction expansion H(s) is defined as:: + + r[0] r[1] r[-1] + = -------- + -------- + ... + --------- + k(s) + (s-p[0]) (s-p[1]) (s-p[-1]) + + If there are any repeated roots (closer together than `tol`), then H(s) + has terms like:: + + r[i] r[i+1] r[i+n-1] + -------- + ----------- + ... + ----------- + (s-p[i]) (s-p[i])**2 (s-p[i])**n + + This function is used for polynomials in positive powers of s or z, + such as analog filters or digital filters in controls engineering. For + negative powers of z (typical for digital filters in DSP), use `residuez`. + + See Notes for details about the algorithm. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + r : ndarray + Residues corresponding to the poles. For repeated poles, the residues + are ordered to correspond to ascending by power fractions. + p : ndarray + Poles ordered by magnitude in ascending order. + k : ndarray + Coefficients of the direct polynomial term. + + Warning + ------- + This function may synchronize the device. + + See Also + -------- + scipy.signal.residue + invres, residuez, numpy.poly, unique_roots + + Notes + ----- + The "deflation through subtraction" algorithm is used for + computations --- method 6 in [1]_. + + The form of partial fraction expansion depends on poles multiplicity in + the exact mathematical sense. However there is no way to exactly + determine multiplicity of roots of a polynomial in numerical computing. + Thus you should think of the result of `residue` with given `tol` as + partial fraction expansion computed for the denominator composed of the + computed poles with empirically determined multiplicity. The choice of + `tol` can drastically change the result if there are close poles. + + References + ---------- + .. [1] J. F. Mahoney, B. D. Sivazlian, "Partial fractions expansion: a + review of computational methodology and efficiency", Journal of + Computational and Applied Mathematics, Vol. 9, 1983. + """ + if (cupy.issubdtype(b.dtype, cupy.complexfloating) + or cupy.issubdtype(a.dtype, cupy.complexfloating)): + b = b.astype(complex) + a = a.astype(complex) + else: + b = b.astype(float) + a = a.astype(float) + + b = cupy.trim_zeros(cupy.atleast_1d(b), 'f') + a = cupy.trim_zeros(cupy.atleast_1d(a), 'f') + + if a.size == 0: + raise ValueError("Denominator `a` is zero.") + + poles = roots(a) + if b.size == 0: + return cupy.zeros(poles.shape), _cmplx_sort(poles)[0], cupy.array([]) + + if len(b) < len(a): + k = cupy.empty(0) + else: + k, b = _polydiv(b, a) + + unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) + unique_poles, order = _cmplx_sort(unique_poles) + multiplicity = multiplicity[order] + + residues = _compute_residues(unique_poles, multiplicity, b) + + index = 0 + for pole, mult in zip(unique_poles, multiplicity): + poles[index:index + mult] = pole + index += mult + + return residues / a[0], poles, k + + +def residuez(b, a, tol=1e-3, rtype='avg'): + """Compute partial-fraction expansion of b(z) / a(z). + + If `M` is the degree of numerator `b` and `N` the degree of denominator + `a`:: + + b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) + H(z) = ------ = ------------------------------------------ + a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) + + then the partial-fraction expansion H(z) is defined as:: + + r[0] r[-1] + = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... + (1-p[0]z**(-1)) (1-p[-1]z**(-1)) + + If there are any repeated roots (closer than `tol`), then the partial + fraction expansion has terms like:: + + r[i] r[i+1] r[i+n-1] + -------------- + ------------------ + ... + ------------------ + (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n + + This function is used for polynomials in negative powers of z, + such as digital filters in DSP. For positive powers, use `residue`. + + See Notes of `residue` for details about the algorithm. + + Parameters + ---------- + b : array_like + Numerator polynomial coefficients. + a : array_like + Denominator polynomial coefficients. + tol : float, optional + The tolerance for two roots to be considered equal in terms of + the distance between them. Default is 1e-3. See `unique_roots` + for further details. + rtype : {'avg', 'min', 'max'}, optional + Method for computing a root to represent a group of identical roots. + Default is 'avg'. See `unique_roots` for further details. + + Returns + ------- + r : ndarray + Residues corresponding to the poles. For repeated poles, the residues + are ordered to correspond to ascending by power fractions. + p : ndarray + Poles ordered by magnitude in ascending order. + k : ndarray + Coefficients of the direct polynomial term. + + Warning + ------- + This function may synchronize the device. + + See Also + -------- + scipy.signal.residuez + invresz, residue, unique_roots + """ + + if (cupy.issubdtype(b.dtype, cupy.complexfloating) + or cupy.issubdtype(a.dtype, cupy.complexfloating)): + b = b.astype(complex) + a = a.astype(complex) + else: + b = b.astype(float) + a = a.astype(float) + + b = cupy.trim_zeros(cupy.atleast_1d(b), 'b') + a = cupy.trim_zeros(cupy.atleast_1d(a), 'b') + + if a.size == 0: + raise ValueError("Denominator `a` is zero.") + elif a[0] == 0: + raise ValueError("First coefficient of determinant `a` must be " + "non-zero.") + + poles = roots(a) + if b.size == 0: + return cupy.zeros(poles.shape), _cmplx_sort(poles)[0], cupy.array([]) + + b_rev = b[::-1] + a_rev = a[::-1] + + if len(b_rev) < len(a_rev): + k_rev = cupy.empty(0) + else: + k_rev, b_rev = _polydiv(b_rev, a_rev) + + unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) + unique_poles, order = _cmplx_sort(unique_poles) + multiplicity = multiplicity[order] + + residues = _compute_residues(1 / unique_poles, multiplicity, b_rev) + + index = 0 + powers = cupy.empty(len(residues), dtype=int) + for pole, mult in zip(unique_poles, multiplicity): + poles[index:index + mult] = pole + powers[index:index + mult] = 1 + cupy.arange(int(mult)) + index += mult + + residues *= (-poles) ** powers / a_rev[0] + + return residues, poles, k_rev[::-1] diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_resample.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_resample.py new file mode 100644 index 0000000000000000000000000000000000000000..54c89e1ad8000cadf729205db0a37b5e8c04ae04 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_resample.py @@ -0,0 +1,556 @@ + +""" +Signal sampling functions. + +Some of the functions defined here were ported directly from CuSignal under +terms of the MIT license, under the following notice: + +Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +""" + +import operator +from math import gcd + +import cupy +from cupyx.scipy.fft import fft, rfft, fftfreq, ifft, irfft, ifftshift +from cupyx.scipy.signal._iir_filter_design import cheby1 +from cupyx.scipy.signal._fir_filter_design import firwin +from cupyx.scipy.signal._iir_filter_conversions import zpk2sos +from cupyx.scipy.signal._ltisys import dlti +from cupyx.scipy.signal._upfirdn import upfirdn, _output_len +from cupyx.scipy.signal._signaltools import ( + sosfiltfilt, filtfilt, sosfilt, lfilter) +from cupyx.scipy.signal.windows._windows import get_window + + +def _design_resample_poly(up, down, window): + """ + Design a prototype FIR low-pass filter using the window method + for use in polyphase rational resampling. + + Parameters + ---------- + up : int + The upsampling factor. + down : int + The downsampling factor. + window : string or tuple + Desired window to use to design the low-pass filter. + See below for details. + + Returns + ------- + h : array + The computed FIR filter coefficients. + + See Also + -------- + resample_poly : Resample up or down using the polyphase method. + + Notes + ----- + The argument `window` specifies the FIR low-pass filter design. + The functions `cusignal.get_window` and `cusignal.firwin` + are called to generate the appropriate filter coefficients. + + The returned array of coefficients will always be of data type + `complex128` to maintain precision. For use in lower-precision + filter operations, this array should be converted to the desired + data type before providing it to `cusignal.resample_poly`. + + """ + + # Determine our up and down factors + # Use a rational approximation to save computation time on really long + # signals + g_ = gcd(up, down) + up //= g_ + down //= g_ + + # Design a linear-phase low-pass FIR filter + max_rate = max(up, down) + f_c = 1.0 / max_rate # cutoff of FIR filter (rel. to Nyquist) + + # reasonable cutoff for our sinc-like function + half_len = 10 * max_rate + + h = firwin(2 * half_len + 1, f_c, window=window) + return h + + +def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True): + """ + Downsample the signal after applying an anti-aliasing filter. + + By default, an order 8 Chebyshev type I filter is used. A 30 point FIR + filter with Hamming window is used if `ftype` is 'fir'. + + Parameters + ---------- + x : array_like + The signal to be downsampled, as an N-dimensional array. + q : int + The downsampling factor. When using IIR downsampling, it is recommended + to call `decimate` multiple times for downsampling factors higher than + 13. + n : int, optional + The order of the filter (1 less than the length for 'fir'). Defaults to + 8 for 'iir' and 20 times the downsampling factor for 'fir'. + ftype : str {'iir', 'fir'} or ``dlti`` instance, optional + If 'iir' or 'fir', specifies the type of lowpass filter. If an instance + of an `dlti` object, uses that object to filter before downsampling. + axis : int, optional + The axis along which to decimate. + zero_phase : bool, optional + Prevent phase shift by filtering with `filtfilt` instead of `lfilter` + when using an IIR filter, and shifting the outputs back by the filter's + group delay when using an FIR filter. The default value of ``True`` is + recommended, since a phase shift is generally not desired. + + Returns + ------- + y : ndarray + The down-sampled signal. + + See Also + -------- + resample : Resample up or down using the FFT method. + resample_poly : Resample using polyphase filtering and an FIR filter. + """ + + x = cupy.asarray(x) + q = operator.index(q) + + if n is not None: + n = operator.index(n) + + result_type = x.dtype + if not cupy.issubdtype(result_type, cupy.inexact) \ + or result_type.type == cupy.float16: + # upcast integers and float16 to float64 + result_type = cupy.float64 + + if ftype == 'fir': + if n is None: + half_len = 10 * q # reasonable cutoff for our sinc-like function + n = 2 * half_len + b, a = firwin(n+1, 1. / q, window='hamming'), 1. + b = cupy.asarray(b, dtype=result_type) + a = cupy.asarray(a, dtype=result_type) + elif ftype == 'iir': + iir_use_sos = True + if n is None: + n = 8 + sos = cheby1(n, 0.05, 0.8 / q, output='sos') + sos = cupy.asarray(sos, dtype=result_type) + elif isinstance(ftype, dlti): + system = ftype._as_zpk() + if system.poles.shape[0] == 0: + # FIR + system = ftype._as_tf() + b, a = system.num, system.den + ftype = 'fir' + elif (any(cupy.iscomplex(system.poles)) + or any(cupy.iscomplex(system.poles)) + or cupy.iscomplex(system.gain)): + # sosfilt & sosfiltfilt don't handle complex coeffs + iir_use_sos = False + system = ftype._as_tf() + b, a = system.num, system.den + else: + iir_use_sos = True + sos = zpk2sos(system.zeros, system.poles, system.gain) + sos = cupy.asarray(sos, dtype=result_type) + else: + raise ValueError('invalid ftype') + + sl = [slice(None)] * x.ndim + + if ftype == 'fir': + b = b / a + if zero_phase: + y = resample_poly(x, 1, q, axis=axis, window=b) + else: + # upfirdn is generally faster than lfilter by a factor equal to the + # downsampling factor, since it only calculates the needed outputs + n_out = x.shape[axis] // q + bool(x.shape[axis] % q) + y = upfirdn(b, x, up=1, down=q, axis=axis) + sl[axis] = slice(None, n_out, None) + + else: # IIR case + if zero_phase: + if iir_use_sos: + y = sosfiltfilt(sos, x, axis=axis) + else: + y = filtfilt(b, a, x, axis=axis) + else: + if iir_use_sos: + y = sosfilt(sos, x, axis=axis) + else: + y = lfilter(b, a, x, axis=axis) + + sl[axis] = slice(None, None, q) + + return y[tuple(sl)] + + +def resample(x, num, t=None, axis=0, window=None, domain="time"): + """ + Resample `x` to `num` samples using Fourier method along the given axis. + + The resampled signal starts at the same value as `x` but is sampled + with a spacing of ``len(x) / num * (spacing of x)``. Because a + Fourier method is used, the signal is assumed to be periodic. + + Parameters + ---------- + x : array_like + The data to be resampled. + num : int + The number of samples in the resampled signal. + t : array_like, optional + If `t` is given, it is assumed to be the sample positions + associated with the signal data in `x`. + axis : int, optional + The axis of `x` that is resampled. Default is 0. + window : array_like, callable, string, float, or tuple, optional + Specifies the window applied to the signal in the Fourier + domain. See below for details. + domain : string, optional + A string indicating the domain of the input `x`: + + ``time`` + Consider the input `x` as time-domain. (Default) + ``freq`` + Consider the input `x` as frequency-domain. + + Returns + ------- + resampled_x or (resampled_x, resampled_t) + Either the resampled array, or, if `t` was given, a tuple + containing the resampled array and the corresponding resampled + positions. + + See Also + -------- + decimate : Downsample the signal after applying an FIR or IIR filter. + resample_poly : Resample using polyphase filtering and an FIR filter. + + Notes + ----- + The argument `window` controls a Fourier-domain window that tapers + the Fourier spectrum before zero-padding to alleviate ringing in + the resampled values for sampled signals you didn't intend to be + interpreted as band-limited. + + If `window` is a function, then it is called with a vector of inputs + indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ). + + If `window` is an array of the same length as `x.shape[axis]` it is + assumed to be the window to be applied directly in the Fourier + domain (with dc and low-frequency first). + + For any other type of `window`, the function `cusignal.get_window` + is called to generate the window. + + The first sample of the returned vector is the same as the first + sample of the input vector. The spacing between samples is changed + from ``dx`` to ``dx * len(x) / num``. + + If `t` is not None, then it represents the old sample positions, + and the new sample positions will be returned as well as the new + samples. + + As noted, `resample` uses FFT transformations, which can be very + slow if the number of input or output samples is large and prime; + see `scipy.fftpack.fft`. + + Examples + -------- + Note that the end of the resampled data rises to meet the first + sample of the next cycle: + + >>> import cupy as cp + >>> import cupyx.scipy.signal import resample + + >>> x = cupy.linspace(0, 10, 20, endpoint=False) + >>> y = cupy.cos(-x**2/6.0) + >>> f = resample(y, 100) + >>> xnew = cupy.linspace(0, 10, 100, endpoint=False) + + >>> import matplotlib.pyplot as plt + >>> plt.plot(cupy.asnumpy(x), cupy.asnumpy(y), 'go-', cupy.asnumpy(xnew), \ + cupy.asnumpy(f), '.-', 10, cupy.asnumpy(y[0]), 'ro') + >>> plt.legend(['data', 'resampled'], loc='best') + >>> plt.show() + """ + if domain not in ('time', 'freq'): + raise ValueError("Acceptable domain flags are 'time' or" + " 'freq', not domain={}".format(domain)) + + x = cupy.asarray(x) + Nx = x.shape[axis] + + # Check if we can use faster real FFT + real_input = cupy.isrealobj(x) + + if domain == 'time': + # Forward transform + if real_input: + X = rfft(x, axis=axis) + else: # Full complex FFT + X = fft(x, axis=axis) + else: # domain == 'freq' + X = x + + # Apply window to spectrum + if window is not None: + if callable(window): + W = window(fftfreq(Nx)) + elif isinstance(window, cupy.ndarray): + if window.shape != (Nx,): + raise ValueError('window must have the same length as data') + W = window + else: + W = ifftshift(get_window(window, Nx)) + + newshape_W = [1] * x.ndim + newshape_W[axis] = X.shape[axis] + if real_input: + # Fold the window back on itself to mimic complex behavior + W_real = W.copy() + W_real[1:] += W_real[-1:0:-1] + W_real[1:] *= 0.5 + X *= W_real[:newshape_W[axis]].reshape(newshape_W) + else: + X *= W.reshape(newshape_W) + + # Copy each half of the original spectrum to the output spectrum, either + # truncating high frequencies (downsampling) or zero-padding them + # (upsampling) + + # Placeholder array for output spectrum + newshape = list(x.shape) + if real_input: + newshape[axis] = num // 2 + 1 + else: + newshape[axis] = num + Y = cupy.zeros(newshape, X.dtype) + + # Copy positive frequency components (and Nyquist, if present) + N = min(num, Nx) + nyq = N // 2 + 1 # Slice index that includes Nyquist if present + sl = [slice(None)] * x.ndim + sl[axis] = slice(0, nyq) + Y[tuple(sl)] = X[tuple(sl)] + if not real_input: + # Copy negative frequency components + if N > 2: # (slice expression doesn't collapse to empty array) + sl[axis] = slice(nyq - N, None) + Y[tuple(sl)] = X[tuple(sl)] + + # Split/join Nyquist component(s) if present + # So far we have set Y[+N/2]=X[+N/2] + if N % 2 == 0: + if num < Nx: # downsampling + if real_input: + sl[axis] = slice(N//2, N//2 + 1) + Y[tuple(sl)] *= 2. + else: + # select the component of Y at frequency +N/2, + # add the component of X at -N/2 + sl[axis] = slice(-N//2, -N//2 + 1) + Y[tuple(sl)] += X[tuple(sl)] + elif Nx < num: # upsampling + # select the component at frequency +N/2 and halve it + sl[axis] = slice(N//2, N//2 + 1) + Y[tuple(sl)] *= 0.5 + if not real_input: + temp = Y[tuple(sl)] + # set the component at -N/2 equal to the component at +N/2 + sl[axis] = slice(num-N//2, num-N//2 + 1) + Y[tuple(sl)] = temp + + # Inverse transform + if real_input: + y = irfft(Y, num, axis=axis) + else: + y = ifft(Y, axis=axis, overwrite_x=True) + + y *= (float(num) / float(Nx)) + + if t is None: + return y + else: + new_t = cupy.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0] + return y, new_t + + +def resample_poly(x, up, down, axis=0, window=("kaiser", 5.0), + padtype='constant', cval=None): + """ + Resample `x` along the given axis using polyphase filtering. + + The signal `x` is upsampled by the factor `up`, a zero-phase low-pass + FIR filter is applied, and then it is downsampled by the factor `down`. + The resulting sample rate is ``up / down`` times the original sample + rate. Values beyond the boundary of the signal are assumed to be zero + during the filtering step. + + Parameters + ---------- + x : array_like + The data to be resampled. + up : int + The upsampling factor. + down : int + The downsampling factor. + axis : int, optional + The axis of `x` that is resampled. Default is 0. + window : string, tuple, or array_like, optional + Desired window to use to design the low-pass filter, or the FIR filter + coefficients to employ. See below for details. + padtype : string, optional + `constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of + the other signal extension modes supported by + `cupyx.scipy.signal.upfirdn`. Changes assumptions on values beyond + the boundary. If `constant`, assumed to be `cval` (default zero). + If `line` assumed to continue a linear trend defined by the first and + last points. `mean`, `median`, `maximum` and `minimum` work as in + `cupy.pad` and assume that the values beyond the boundary are the mean, + median, maximum or minimum respectively of the array along the axis. + cval : float, optional + Value to use if `padtype='constant'`. Default is zero. + + Returns + ------- + resampled_x : array + The resampled array. + + See Also + -------- + decimate : Downsample the signal after applying an FIR or IIR filter. + resample : Resample up or down using the FFT method. + + Notes + ----- + This polyphase method will likely be faster than the Fourier method + in `cusignal.resample` when the number of samples is large and + prime, or when the number of samples is large and `up` and `down` + share a large greatest common denominator. The length of the FIR + filter used will depend on ``max(up, down) // gcd(up, down)``, and + the number of operations during polyphase filtering will depend on + the filter length and `down` (see `cusignal.upfirdn` for details). + + The argument `window` specifies the FIR low-pass filter design. + + If `window` is an array_like it is assumed to be the FIR filter + coefficients. Note that the FIR filter is applied after the upsampling + step, so it should be designed to operate on a signal at a sampling + frequency higher than the original by a factor of `up//gcd(up, down)`. + This function's output will be centered with respect to this array, so it + is best to pass a symmetric filter with an odd number of samples if, as + is usually the case, a zero-phase filter is desired. + + For any other type of `window`, the functions `cusignal.get_window` + and `cusignal.firwin` are called to generate the appropriate filter + coefficients. + + The first sample of the returned vector is the same as the first + sample of the input vector. The spacing between samples is changed + from ``dx`` to ``dx * down / float(up)``. + + Examples + -------- + Note that the end of the resampled data rises to meet the first + sample of the next cycle for the FFT method, and gets closer to zero + for the polyphase method: + + >>> import cupy + >>> import cupyx.scipy.signal import resample, resample_poly + + >>> x = cupy.linspace(0, 10, 20, endpoint=False) + >>> y = cupy.cos(-x**2/6.0) + >>> f_fft = resample(y, 100) + >>> f_poly = resample_poly(y, 100, 20) + >>> xnew = cupy.linspace(0, 10, 100, endpoint=False) + + >>> import matplotlib.pyplot as plt + >>> plt.plot(cupy.asnumpy(xnew), cupy.asnumpy(f_fft), 'b.-', \ + cupy.asnumpy(xnew), cupy.asnumpy(f_poly), 'r.-') + >>> plt.plot(cupy.asnumpy(x), cupy.asnumpy(y), 'ko-') + >>> plt.plot(10, cupy.asnumpy(y[0]), 'bo', 10, 0., 'ro') # boundaries + >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best') + >>> plt.show() + """ + + if padtype != 'constant' or cval is not None: + raise ValueError( + 'padtype and cval arguments are not supported by upfirdn') + + x = cupy.asarray(x) + up = int(up) + down = int(down) + if up < 1 or down < 1: + raise ValueError("up and down must be >= 1") + + # Determine our up and down factors + # Use a rational approximation to save computation time on really long + # signals + g_ = gcd(up, down) + up //= g_ + down //= g_ + if up == down == 1: + return x.copy() + n_out = x.shape[axis] * up + n_out = n_out // down + bool(n_out % down) + + if isinstance(window, (list, cupy.ndarray)): + window = cupy.asarray(window) + if window.ndim > 1: + raise ValueError("window must be 1-D") + half_len = (window.size - 1) // 2 + h = up * window + else: + half_len = 10 * max(up, down) + h = up * _design_resample_poly(up, down, window) + + # Zero-pad our filter to put the output samples at the center + n_pre_pad = down - half_len % down + n_post_pad = 0 + n_pre_remove = (half_len + n_pre_pad) // down + # We should rarely need to do this given our filter lengths... + while ( + _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis], up, down) + < n_out + n_pre_remove + ): + n_post_pad += 1 + + h = cupy.concatenate( + (cupy.zeros(n_pre_pad, h.dtype), h, cupy.zeros(n_post_pad, h.dtype))) + n_pre_remove_end = n_pre_remove + n_out + + # filter then remove excess + y = upfirdn(h, x, up, down, axis) + keep = [slice(None)] * x.ndim + keep[axis] = slice(n_pre_remove, n_pre_remove_end) + + return y[tuple(keep)] diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_savitzky_golay.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_savitzky_golay.py new file mode 100644 index 0000000000000000000000000000000000000000..fcfc6ee202e3ea15040cfd6eaa94e43d589a47a3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_savitzky_golay.py @@ -0,0 +1,378 @@ +import math + +import cupy +from cupy.linalg import lstsq + +from cupyx.scipy.ndimage import convolve1d +from ._arraytools import axis_slice + + +# https://github.com/scipy/scipy/blob/v1.10.1/scipy/_lib/_util.py#L166 +def float_factorial(n: int) -> float: + """Compute the factorial and return as a float + + Returns infinity when result is too large for a double + """ + return float(math.factorial(n)) if n < 171 else cupy.inf + + +# https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/polynomial.py#L704-L780 +# inline instead of using `cupy.polyval` because the latter raises on +# non-1D arguments, which `np.polyval` accepts. +def _polyval(p, x): + p = cupy.asarray(p) + x = cupy.asanyarray(x) + y = cupy.zeros_like(x) + for pv in p: + y = y * x + pv + return y + + +def savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None, + use="conv"): + """Compute the coefficients for a 1-D Savitzky-Golay FIR filter. + + Parameters + ---------- + window_length : int + The length of the filter window (i.e., the number of coefficients). + polyorder : int + The order of the polynomial used to fit the samples. + `polyorder` must be less than `window_length`. + deriv : int, optional + The order of the derivative to compute. This must be a + nonnegative integer. The default is 0, which means to filter + the data without differentiating. + delta : float, optional + The spacing of the samples to which the filter will be applied. + This is only used if deriv > 0. + pos : int or None, optional + If pos is not None, it specifies evaluation position within the + window. The default is the middle of the window. + use : str, optional + Either 'conv' or 'dot'. This argument chooses the order of the + coefficients. The default is 'conv', which means that the + coefficients are ordered to be used in a convolution. With + use='dot', the order is reversed, so the filter is applied by + dotting the coefficients with the data set. + + Returns + ------- + coeffs : 1-D ndarray + The filter coefficients. + + See Also + -------- + scipy.signal.savgol_coeffs + savgol_filter + + + References + ---------- + A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by + Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8), + pp 1627-1639. + Jianwen Luo, Kui Ying, and Jing Bai. 2005. Savitzky-Golay smoothing and + differentiation filter for even number data. Signal Process. + 85, 7 (July 2005), 1429-1434. + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import savgol_coeffs + >>> savgol_coeffs(5, 2) + array([-0.08571429, 0.34285714, 0.48571429, 0.34285714, -0.08571429]) + >>> savgol_coeffs(5, 2, deriv=1) + array([ 2.00000000e-01, 1.00000000e-01, 2.07548111e-16, -1.00000000e-01, + -2.00000000e-01]) + + Note that use='dot' simply reverses the coefficients. + + >>> savgol_coeffs(5, 2, pos=3) + array([ 0.25714286, 0.37142857, 0.34285714, 0.17142857, -0.14285714]) + >>> savgol_coeffs(5, 2, pos=3, use='dot') + array([-0.14285714, 0.17142857, 0.34285714, 0.37142857, 0.25714286]) + >>> savgol_coeffs(4, 2, pos=3, deriv=1, use='dot') + array([0.45, -0.85, -0.65, 1.05]) + + `x` contains data from the parabola x = t**2, sampled at + t = -1, 0, 1, 2, 3. `c` holds the coefficients that will compute the + derivative at the last position. When dotted with `x` the result should + be 6. + + >>> x = np.array([1, 0, 1, 4, 9]) + >>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot') + >>> c.dot(x) + 6.0 + """ + + # An alternative method for finding the coefficients when deriv=0 is + # t = np.arange(window_length) + # unit = (t == pos).astype(int) + # coeffs = np.polyval(np.polyfit(t, unit, polyorder), t) + # The method implemented here is faster. + + # To recreate the table of sample coefficients shown in the chapter on + # the Savitzy-Golay filter in the Numerical Recipes book, use + # window_length = nL + nR + 1 + # pos = nL + 1 + # c = savgol_coeffs(window_length, M, pos=pos, use='dot') + + if polyorder >= window_length: + raise ValueError("polyorder must be less than window_length.") + + halflen, rem = divmod(window_length, 2) + + if pos is None: + if rem == 0: + pos = halflen - 0.5 + else: + pos = halflen + + if not (0 <= pos < window_length): + raise ValueError("pos must be nonnegative and less than " + "window_length.") + + if use not in ['conv', 'dot']: + raise ValueError("`use` must be 'conv' or 'dot'") + + if deriv > polyorder: + coeffs = cupy.zeros(window_length) + return coeffs + + # Form the design matrix A. The columns of A are powers of the integers + # from -pos to window_length - pos - 1. The powers (i.e., rows) range + # from 0 to polyorder. (That is, A is a vandermonde matrix, but not + # necessarily square.) + x = cupy.arange(-pos, window_length - pos, dtype=float) + + if use == "conv": + # Reverse so that result can be used in a convolution. + x = x[::-1] + + order = cupy.arange(polyorder + 1).reshape(-1, 1) + A = x ** order + + # y determines which order derivative is returned. + y = cupy.zeros(polyorder + 1) + # The coefficient assigned to y[deriv] scales the result to take into + # account the order of the derivative and the sample spacing. + y[deriv] = float_factorial(deriv) / (delta ** deriv) + + # Find the least-squares solution of A*c = y + coeffs, _, _, _ = lstsq(A, y, rcond=None) + + return coeffs + + +def _polyder(p, m): + """Differentiate polynomials represented with coefficients. + + p must be a 1-D or 2-D array. In the 2-D case, each column gives + the coefficients of a polynomial; the first row holds the coefficients + associated with the highest power. m must be a nonnegative integer. + (numpy.polyder doesn't handle the 2-D case.) + """ + + if m == 0: + result = p + else: + n = len(p) + if n <= m: + result = cupy.zeros_like(p[:1, ...]) + else: + dp = p[:-m].copy() + for k in range(m): + rng = cupy.arange(n - k - 1, m - k - 1, -1) + dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1)) + result = dp + return result + + +def _fit_edge(x, window_start, window_stop, interp_start, interp_stop, + axis, polyorder, deriv, delta, y): + """ + Given an N-d array `x` and the specification of a slice of `x` from + `window_start` to `window_stop` along `axis`, create an interpolating + polynomial of each 1-D slice, and evaluate that polynomial in the slice + from `interp_start` to `interp_stop`. Put the result into the + corresponding slice of `y`. + """ + + # Get the edge into a (window_length, -1) array. + x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis) + if axis == 0 or axis == -x.ndim: + xx_edge = x_edge + swapped = False + else: + xx_edge = x_edge.swapaxes(axis, 0) + swapped = True + xx_edge = xx_edge.reshape(xx_edge.shape[0], -1) + + # Fit the edges. poly_coeffs has shape (polyorder + 1, -1), + # where '-1' is the same as in xx_edge. + poly_coeffs = cupy.polyfit(cupy.arange(0, window_stop - window_start), + xx_edge, polyorder) + + if deriv > 0: + poly_coeffs = _polyder(poly_coeffs, deriv) + + # Compute the interpolated values for the edge. + i = cupy.arange(interp_start - window_start, interp_stop - window_start) + values = _polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv) + + # Now put the values into the appropriate slice of y. + # First reshape values to match y. + shp = list(y.shape) + shp[0], shp[axis] = shp[axis], shp[0] + values = values.reshape(interp_stop - interp_start, *shp[1:]) + if swapped: + values = values.swapaxes(0, axis) + # Get a view of the data to be replaced by values. + y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis) + y_edge[...] = values + + +def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y): + """ + Use polynomial interpolation of x at the low and high ends of the axis + to fill in the halflen values in y. + + This function just calls _fit_edge twice, once for each end of the axis. + """ + halflen = window_length // 2 + _fit_edge(x, 0, window_length, 0, halflen, axis, + polyorder, deriv, delta, y) + n = x.shape[axis] + _fit_edge(x, n - window_length, n, n - halflen, n, axis, + polyorder, deriv, delta, y) + + +def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0, + axis=-1, mode='interp', cval=0.0): + """ Apply a Savitzky-Golay filter to an array. + + This is a 1-D filter. If `x` has dimension greater than 1, `axis` + determines the axis along which the filter is applied. + + Parameters + ---------- + x : array_like + The data to be filtered. If `x` is not a single or double precision + floating point array, it will be converted to type ``numpy.float64`` + before filtering. + window_length : int + The length of the filter window (i.e., the number of coefficients). + If `mode` is 'interp', `window_length` must be less than or equal + to the size of `x`. + polyorder : int + The order of the polynomial used to fit the samples. + `polyorder` must be less than `window_length`. + deriv : int, optional + The order of the derivative to compute. This must be a + nonnegative integer. The default is 0, which means to filter + the data without differentiating. + delta : float, optional + The spacing of the samples to which the filter will be applied. + This is only used if deriv > 0. Default is 1.0. + axis : int, optional + The axis of the array `x` along which the filter is to be applied. + Default is -1. + mode : str, optional + Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This + determines the type of extension to use for the padded signal to + which the filter is applied. When `mode` is 'constant', the padding + value is given by `cval`. See the Notes for more details on 'mirror', + 'constant', 'wrap', and 'nearest'. + When the 'interp' mode is selected (the default), no extension + is used. Instead, a degree `polyorder` polynomial is fit to the + last `window_length` values of the edges, and this polynomial is + used to evaluate the last `window_length // 2` output values. + cval : scalar, optional + Value to fill past the edges of the input if `mode` is 'constant'. + Default is 0.0. + + Returns + ------- + y : ndarray, same shape as `x` + The filtered data. + + See Also + -------- + savgol_coeffs + + Notes + ----- + Details on the `mode` options: + + 'mirror': + Repeats the values at the edges in reverse order. The value + closest to the edge is not included. + 'nearest': + The extension contains the nearest input value. + 'constant': + The extension contains the value given by the `cval` argument. + 'wrap': + The extension contains the values from the other end of the array. + + For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and + `window_length` is 7, the following shows the extended data for + the various `mode` options (assuming `cval` is 0):: + + mode | Ext | Input | Ext + -----------+---------+------------------------+--------- + 'mirror' | 4 3 2 | 1 2 3 4 5 6 7 8 | 7 6 5 + 'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8 + 'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0 + 'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3 + + .. versionadded:: 0.14.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.signal import savgol_filter + >>> np.set_printoptions(precision=2) # For compact display. + >>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9]) + + Filter with a window length of 5 and a degree 2 polynomial. Use + the defaults for all other parameters. + + >>> savgol_filter(x, 5, 2) + array([1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1. , 4. , 9. ]) + + Note that the last five values in x are samples of a parabola, so + when mode='interp' (the default) is used with polyorder=2, the last + three values are unchanged. Compare that to, for example, + `mode='nearest'`: + + >>> savgol_filter(x, 5, 2, mode='nearest') + array([1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97]) + + """ + if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]: + raise ValueError("mode must be 'mirror', 'constant', 'nearest' " + "'wrap' or 'interp'.") + + x = cupy.asarray(x) + # Ensure that x is either single or double precision floating point. + if x.dtype != cupy.float64 and x.dtype != cupy.float32: + x = x.astype(cupy.float64) + + coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta) + + if mode == "interp": + if window_length > x.shape[axis]: + raise ValueError("If mode is 'interp', window_length must be less " + "than or equal to the size of x.") + + # Do not pad. Instead, for the elements within `window_length // 2` + # of the ends of the sequence, use the polynomial that is fitted to + # the last `window_length` elements. + y = convolve1d(x, coeffs, axis=axis, mode="constant") + _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y) + else: + # Any mode other than 'interp' is passed on to ndimage.convolve1d. + y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval) + + return y diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_signaltools.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_signaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..a5c74a8f33ee8ef5aac3f6ee6f6269448978421d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_signaltools.py @@ -0,0 +1,1768 @@ +import warnings + +import cupy +from cupy._core import internal +from cupy.linalg import lstsq + +import cupyx.scipy.fft as sp_fft +from cupyx.scipy.ndimage import _util +from cupyx.scipy.ndimage import _filters +from cupyx.scipy.signal import _signaltools_core as _st_core +from cupyx.scipy.signal._arraytools import ( + const_ext, even_ext, odd_ext, axis_reverse, axis_slice, axis_assign) +from cupyx.scipy.signal._iir_utils import ( + apply_iir, apply_iir_sos, compute_correction_factors, + compute_correction_factors_sos) + + +def convolve(in1, in2, mode='full', method='auto'): + """Convolve two N-dimensional arrays. + + Convolve ``in1`` and ``in2``, with the output size determined by the + ``mode`` argument. + + Args: + in1 (cupy.ndarray): First input. + in2 (cupy.ndarray): Second input. Should have the same number of + dimensions as `in1`. + mode (str): Indicates the size of the output: + + - ``'full'``: output is the full discrete linear convolution \ + (default) + - ``'valid'``: output consists only of those elements that do \ + not rely on the zero-padding. Either ``in1`` or ``in2`` must \ + be at least as large as the other in every dimension. + - ``'same'``: - output is the same size as ``in1``, centered with \ + respect to the ``'full'`` output + + method (str): Indicates which method to use for the computations: + + - ``'direct'``: The convolution is determined directly from sums, \ + the definition of convolution + - ``'fft'``: The Fourier Transform is used to perform the \ + convolution by calling ``fftconvolve``. + - ``'auto'``: Automatically choose direct of FFT based on an \ + estimate of which is faster for the arguments (default). + + Returns: + cupy.ndarray: the result of convolution. + + .. seealso:: :func:`cupyx.scipy.signal.choose_conv_method` + .. seealso:: :func:`cupyx.scipy.signal.correlation` + .. seealso:: :func:`cupyx.scipy.signal.fftconvolve` + .. seealso:: :func:`cupyx.scipy.signal.oaconvolve` + .. seealso:: :func:`cupyx.scipy.ndimage.convolve` + .. seealso:: :func:`scipy.signal.convolve` + .. note:: + By default, ``convolve`` and ``correlate`` use ``method='auto'``, which + calls ``choose_conv_method`` to choose the fastest method using + pre-computed values. CuPy may not choose the same method to compute + the convolution as SciPy does given the same inputs. + """ + return _correlate(in1, in2, mode, method, True) + + +def correlate(in1, in2, mode='full', method='auto'): + """Cross-correlate two N-dimensional arrays. + + Cross-correlate ``in1`` and ``in2``, with the output size determined by the + ``mode`` argument. + + Args: + in1 (cupy.ndarray): First input. + in2 (cupy.ndarray): Second input. Should have the same number of + dimensions as ``in1``. + mode (str): Indicates the size of the output: + + - ``'full'``: output is the full discrete linear convolution \ + (default) + - ``'valid'``: output consists only of those elements that do \ + not rely on the zero-padding. Either ``in1`` or ``in2`` must \ + be at least as large as the other in every dimension. + - ``'same'``: - output is the same size as ``in1``, centered with \ + respect to the ``'full'`` output + + method (str): Indicates which method to use for the computations: + + - ``'direct'``: The convolution is determined directly from sums, \ + the definition of convolution + - ``'fft'``: The Fourier Transform is used to perform the \ + convolution by calling ``fftconvolve``. + - ``'auto'``: Automatically choose direct of FFT based on an \ + estimate of which is faster for the arguments (default). + + Returns: + cupy.ndarray: the result of correlation. + + .. seealso:: :func:`cupyx.scipy.signal.choose_conv_method` + .. seealso:: :func:`cupyx.scipy.signal.convolve` + .. seealso:: :func:`cupyx.scipy.signal.fftconvolve` + .. seealso:: :func:`cupyx.scipy.signal.oaconvolve` + .. seealso:: :func:`cupyx.scipy.ndimage.correlation` + .. seealso:: :func:`scipy.signal.correlation` + .. note:: + By default, ``convolve`` and ``correlate`` use ``method='auto'``, which + calls ``choose_conv_method`` to choose the fastest method using + pre-computed values. CuPy may not choose the same method to compute + the convolution as SciPy does given the same inputs. + """ + return _correlate(in1, in2, mode, method, False) + + +def _correlate(in1, in2, mode='full', method='auto', convolution=False): + quick_out = _st_core._check_conv_inputs(in1, in2, mode, convolution) + if quick_out is not None: + return quick_out + if method not in ('auto', 'direct', 'fft'): + raise ValueError('acceptable methods are "auto", "direct", or "fft"') + + if method == 'auto': + method = choose_conv_method(in1, in2, mode=mode) + + if method == 'direct': + return _st_core._direct_correlate(in1, in2, mode, in1.dtype, + convolution) + + # if method == 'fft': + if not convolution: + in2 = _st_core._reverse(in2).conj() + inputs_swapped = _st_core._inputs_swap_needed(mode, in1.shape, in2.shape) + if inputs_swapped: + in1, in2 = in2, in1 + out = fftconvolve(in1, in2, mode) + result_type = cupy.result_type(in1, in2) + if result_type.kind in 'ui': + out = out.round() + out = out.astype(result_type, copy=False) + return out + + +def fftconvolve(in1, in2, mode='full', axes=None): + """Convolve two N-dimensional arrays using FFT. + + Convolve ``in1`` and ``in2`` using the fast Fourier transform method, with + the output size determined by the ``mode`` argument. + + This is generally much faster than the ``'direct'`` method of ``convolve`` + for large arrays, but can be slower when only a few output values are + needed, and can only output float arrays (int or object array inputs will + be cast to float). + + Args: + in1 (cupy.ndarray): First input. + in2 (cupy.ndarray): Second input. Should have the same number of + dimensions as ``in1``. + mode (str): Indicates the size of the output: + + - ``'full'``: output is the full discrete linear \ + cross-correlation (default) + - ``'valid'``: output consists only of those elements that do \ + not rely on the zero-padding. Either ``in1`` or \ + ``in2`` must be at least as large as the other in \ + every dimension. + - ``'same'``: output is the same size as ``in1``, centered \ + with respect to the 'full' output + + axes (scalar or tuple of scalar or None): Axes over which to compute + the convolution. The default is over all axes. + + Returns: + cupy.ndarray: the result of convolution + + .. seealso:: :func:`cupyx.scipy.signal.choose_conv_method` + .. seealso:: :func:`cupyx.scipy.signal.correlation` + .. seealso:: :func:`cupyx.scipy.signal.convolve` + .. seealso:: :func:`cupyx.scipy.signal.oaconvolve` + .. seealso:: :func:`cupyx.scipy.ndimage.convolve` + .. seealso:: :func:`scipy.signal.correlation` + """ + out = _st_core._check_conv_inputs(in1, in2, mode) + if out is not None: + return out + in1, in2, axes = _st_core._init_freq_conv_axes(in1, in2, mode, axes, False) + shape = [max(x1, x2) if a not in axes else x1 + x2 - 1 + for a, (x1, x2) in enumerate(zip(in1.shape, in2.shape))] + out = _st_core._freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True) + return _st_core._apply_conv_mode(out, in1.shape, in2.shape, mode, axes) + + +def choose_conv_method(in1, in2, mode='full'): + """Find the fastest convolution/correlation method. + + Args: + in1 (cupy.ndarray): first input. + in2 (cupy.ndarray): second input. + mode (str, optional): ``'valid'``, ``'same'``, ``'full'``. + + Returns: + str: A string indicating which convolution method is fastest, + either ``'direct'`` or ``'fft'``. + + .. warning:: + This function currently doesn't support measure option, + nor multidimensional inputs. It does not guarantee + the compatibility of the return value to SciPy's one. + + .. seealso:: :func:`scipy.signal.choose_conv_method` + + """ + return cupy._math.misc._choose_conv_method(in1, in2, mode) + + +def oaconvolve(in1, in2, mode="full", axes=None): + """Convolve two N-dimensional arrays using the overlap-add method. + + Convolve ``in1`` and ``in2`` using the overlap-add method, with the output + size determined by the ``mode`` argument. This is generally faster than + ``convolve`` for large arrays, and generally faster than ``fftconvolve`` + when one array is much larger than the other, but can be slower when only a + few output values are needed or when the arrays are very similar in shape, + and can only output float arrays (int or object array inputs will be cast + to float). + + Args: + in1 (cupy.ndarray): First input. + in2 (cupy.ndarray): Second input. Should have the same number of + dimensions as ``in1``. + mode (str): Indicates the size of the output: + + - ``'full'``: output is the full discrete linear \ + cross-correlation (default) + - ``'valid'``: output consists only of those elements that do \ + not rely on the zero-padding. Either ``in1`` or \ + ``in2`` must be at least as large as the other in \ + every dimension. + - ``'same'``: output is the same size as ``in1``, centered \ + with respect to the ``'full'`` output + + axes (scalar or tuple of scalar or None): Axes over which to compute + the convolution. The default is over all axes. + + Returns: + cupy.ndarray: the result of convolution + + .. seealso:: :func:`cupyx.scipy.signal.convolve` + .. seealso:: :func:`cupyx.scipy.signal.fftconvolve` + .. seealso:: :func:`cupyx.scipy.ndimage.convolve` + .. seealso:: :func:`scipy.signal.oaconvolve` + """ + out = _st_core._check_conv_inputs(in1, in2, mode) + if out is not None: + return out + if in1.shape == in2.shape: # Equivalent to fftconvolve + return fftconvolve(in1, in2, mode=mode, axes=axes) + + in1, in2, axes = _st_core._init_freq_conv_axes(in1, in2, mode, axes, + sorted_axes=True) + s1, s2 = in1.shape, in2.shape + if not axes: + return _st_core._apply_conv_mode(in1*in2, s1, s2, mode, axes) + + # Calculate the block sizes for the output, steps, first and second inputs. + # It is simpler to calculate them all together than doing them in separate + # loops due to all the special cases that need to be handled. + optimal_sizes = (_st_core._calc_oa_lens(s1[i], s2[i]) if i in axes else + (-1, -1, s1[i], s2[i]) for i in range(in1.ndim)) + block_size, overlaps, in1_step, in2_step = zip(*optimal_sizes) + + # Fall back to fftconvolve if there is only one block in every dimension + if in1_step == s1 and in2_step == s2: + return fftconvolve(in1, in2, mode=mode, axes=axes) + + # Pad and reshape the inputs for overlapping and adding + shape_final = [s1[i]+s2[i]-1 if i in axes else None + for i in range(in1.ndim)] + in1, in2 = _st_core._oa_reshape_inputs(in1, in2, axes, shape_final, + block_size, overlaps, + in1_step, in2_step) + + # Reshape the overlap-add parts to input block sizes + split_axes = [iax+i for i, iax in enumerate(axes)] + fft_axes = [iax+1 for iax in split_axes] + + # Do the convolution + fft_shape = [block_size[i] for i in axes] + ret = _st_core._freq_domain_conv(in1, in2, fft_axes, fft_shape, + calc_fast_len=False) + + # Do the overlap-add + for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes): + overlap = overlaps[ax] + if overlap is None: + continue + + ret, overpart = cupy.split(ret, [-overlap], ax_fft) + overpart = cupy.split(overpart, [-1], ax_split)[0] + + ret_overpart = cupy.split(ret, [overlap], ax_fft)[0] + ret_overpart = cupy.split(ret_overpart, [1], ax_split)[1] + ret_overpart += overpart + + # Reshape back to the correct dimensionality + shape_ret = [ret.shape[i] if i not in fft_axes else + ret.shape[i]*ret.shape[i-1] + for i in range(ret.ndim) if i not in split_axes] + ret = ret.reshape(*shape_ret) + + # Slice to the correct size + ret = ret[tuple([slice(islice) for islice in shape_final])] + + return _st_core._apply_conv_mode(ret, s1, s2, mode, axes) + + +def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0): + """Convolve two 2-dimensional arrays. + + Convolve ``in1`` and ``in2`` with output size determined by ``mode``, and + boundary conditions determined by ``boundary`` and ``fillvalue``. + + Args: + in1 (cupy.ndarray): First input. + in2 (cupy.ndarray): Second input. Should have the same number of + dimensions as ``in1``. + mode (str): Indicates the size of the output: + + - ``'full'``: output is the full discrete linear convolution \ + (default) + - ``'valid'``: output consists only of those elements that do \ + not rely on the zero-padding. Either ``in1`` or ``in2`` must \ + be at least as large as the other in every dimension. + - ``'same'``: - output is the same size as ``in1``, centered with \ + respect to the ``'full'`` output + + boundary (str): Indicates how to handle boundaries: + + - ``fill``: pad input arrays with fillvalue (default) + - ``wrap``: circular boundary conditions + - ``symm``: symmetrical boundary conditions + + fillvalue (scalar): Value to fill pad input arrays with. Default is 0. + + Returns: + cupy.ndarray: A 2-dimensional array containing a subset of the discrete + linear convolution of ``in1`` with ``in2``. + + .. seealso:: :func:`cupyx.scipy.signal.convolve` + .. seealso:: :func:`cupyx.scipy.signal.fftconvolve` + .. seealso:: :func:`cupyx.scipy.signal.oaconvolve` + .. seealso:: :func:`cupyx.scipy.signal.correlate2d` + .. seealso:: :func:`cupyx.scipy.ndimage.convolve` + .. seealso:: :func:`scipy.signal.convolve2d` + """ + return _correlate2d(in1, in2, mode, boundary, fillvalue, True) + + +def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0): + """Cross-correlate two 2-dimensional arrays. + + Cross correlate ``in1`` and ``in2`` with output size determined by + ``mode``, and boundary conditions determined by ``boundary`` and + ``fillvalue``. + + Args: + in1 (cupy.ndarray): First input. + in2 (cupy.ndarray): Second input. Should have the same number of + dimensions as ``in1``. + mode (str): Indicates the size of the output: + + - ``'full'``: output is the full discrete linear convolution \ + (default) + - ``'valid'``: output consists only of those elements that do \ + not rely on the zero-padding. Either ``in1`` or ``in2`` must \ + be at least as large as the other in every dimension. + - ``'same'``: - output is the same size as ``in1``, centered with \ + respect to the ``'full'`` output + + boundary (str): Indicates how to handle boundaries: + + - ``fill``: pad input arrays with fillvalue (default) + - ``wrap``: circular boundary conditions + - ``symm``: symmetrical boundary conditions + + fillvalue (scalar): Value to fill pad input arrays with. Default is 0. + + Returns: + cupy.ndarray: A 2-dimensional array containing a subset of the discrete + linear cross-correlation of ``in1`` with ``in2``. + + Note: + When using ``"same"`` mode with even-length inputs, the outputs of + ``correlate`` and ``correlate2d`` differ: There is a 1-index offset + between them. + + .. seealso:: :func:`cupyx.scipy.signal.correlate` + .. seealso:: :func:`cupyx.scipy.signal.convolve2d` + .. seealso:: :func:`cupyx.scipy.ndimage.correlate` + .. seealso:: :func:`scipy.signal.correlate2d` + """ + return _correlate2d(in1, in2, mode, boundary, fillvalue, False) + + +def _correlate2d(in1, in2, mode, boundary, fillvalue, convolution=False): + if not (in1.ndim == in2.ndim == 2): + raise ValueError('{} inputs must both be 2-D arrays'.format( + 'convolve2d' if convolution else 'correlate2d')) + _boundaries = { + 'fill': 'constant', 'pad': 'constant', + 'wrap': 'wrap', 'circular': 'wrap', + 'symm': 'reflect', 'symmetric': 'reflect', + } + boundary = _boundaries.get(boundary) + if boundary is None: + raise ValueError('Acceptable boundary flags are "fill" (or "pad"), ' + '"circular" (or "wrap"), and ' + '"symmetric" (or "symm").') + quick_out = _st_core._check_conv_inputs(in1, in2, mode, convolution) + if quick_out is not None: + return quick_out + return _st_core._direct_correlate(in1, in2, mode, in1.dtype, convolution, + boundary, fillvalue, not convolution) + + +def correlation_lags(in1_len, in2_len, mode='full'): + r""" + Calculates the lag / displacement indices array for 1D cross-correlation. + + Parameters + ---------- + in1_len : int + First input size. + in2_len : int + Second input size. + mode : str {'full', 'valid', 'same'}, optional + A string indicating the size of the output. + See the documentation `correlate` for more information. + + Returns + ------- + lags : array + Returns an array containing cross-correlation lag/displacement indices. + Indices can be indexed with the np.argmax of the correlation to return + the lag/displacement. + + See Also + -------- + correlate : Compute the N-dimensional cross-correlation. + scipy.signal.correlation_lags + """ + # calculate lag ranges in different modes of operation + if mode == "full": + # the output is the full discrete linear convolution + # of the inputs. (Default) + lags = cupy.arange(-in2_len + 1, in1_len) + elif mode == "same": + # the output is the same size as `in1`, centered + # with respect to the 'full' output. + # calculate the full output + lags = cupy.arange(-in2_len + 1, in1_len) + # determine the midpoint in the full output + mid = lags.size // 2 + # determine lag_bound to be used with respect + # to the midpoint + lag_bound = in1_len // 2 + # calculate lag ranges for even and odd scenarios + if in1_len % 2 == 0: + lags = lags[(mid - lag_bound):(mid + lag_bound)] + else: + lags = lags[(mid - lag_bound):(mid + lag_bound) + 1] + elif mode == "valid": + # the output consists only of those elements that do not + # rely on the zero-padding. In 'valid' mode, either `in1` or `in2` + # must be at least as large as the other in every dimension. + + # the lag_bound will be either negative or positive + # this let's us infer how to present the lag range + lag_bound = in1_len - in2_len + if lag_bound >= 0: + lags = cupy.arange(lag_bound + 1) + else: + lags = cupy.arange(lag_bound, 1) + return lags + + +def wiener(im, mysize=None, noise=None): + """Perform a Wiener filter on an N-dimensional array. + + Apply a Wiener filter to the N-dimensional array `im`. + + Args: + im (cupy.ndarray): An N-dimensional array. + mysize (int or cupy.ndarray, optional): A scalar or an N-length list + giving the size of the Wiener filter window in each dimension. + Elements of mysize should be odd. If mysize is a scalar, then this + scalar is used as the size in each dimension. + noise (float, optional): The noise-power to use. If None, then noise is + estimated as the average of the local variance of the input. + + Returns: + cupy.ndarray: Wiener filtered result with the same shape as `im`. + + .. seealso:: :func:`scipy.signal.wiener` + """ + if mysize is None: + mysize = 3 + mysize = _util._fix_sequence_arg(mysize, im.ndim, 'mysize', int) + im = im.astype(cupy.complex128 if im.dtype.kind == 'c' else cupy.float64, + copy=False) + + # Estimate the local mean + local_mean = _filters.uniform_filter(im, mysize, mode='constant') + + # Estimate the local variance + local_var = _filters.uniform_filter(im*im, mysize, mode='constant') + local_var -= local_mean*local_mean + + # Estimate the noise power if needed. + if noise is None: + noise = local_var.mean() + + # Perform the filtering + res = im - local_mean + res *= 1 - noise / local_var + res += local_mean + return cupy.where(local_var < noise, local_mean, res) + + +def order_filter(a, domain, rank): + """Perform an order filter on an N-D array. + + Perform an order filter on the array in. The domain argument acts as a mask + centered over each pixel. The non-zero elements of domain are used to + select elements surrounding each input pixel which are placed in a list. + The list is sorted, and the output for that pixel is the element + corresponding to rank in the sorted list. + + Args: + a (cupy.ndarray): The N-dimensional input array. + domain (cupy.ndarray): A mask array with the same number of dimensions + as `a`. Each dimension should have an odd number of elements. + rank (int): A non-negative integer which selects the element from the + sorted list (0 corresponds to the smallest element). + + Returns: + cupy.ndarray: The results of the order filter in an array with the same + shape as `a`. + + .. seealso:: :func:`cupyx.scipy.ndimage.rank_filter` + .. seealso:: :func:`scipy.signal.order_filter` + """ + if a.dtype.kind in 'bc' or a.dtype == cupy.float16: + # scipy doesn't support these types + raise ValueError("data type not supported") + if any(x % 2 != 1 for x in domain.shape): + raise ValueError("Each dimension of domain argument " + " should have an odd number of elements.") + return _filters.rank_filter(a, rank, footprint=domain, mode='constant') + + +def medfilt(volume, kernel_size=None): + """Perform a median filter on an N-dimensional array. + + Apply a median filter to the input array using a local window-size + given by `kernel_size`. The array will automatically be zero-padded. + + Args: + volume (cupy.ndarray): An N-dimensional input array. + kernel_size (int or list of ints): Gives the size of the median filter + window in each dimension. Elements of `kernel_size` should be odd. + If `kernel_size` is a scalar, then this scalar is used as the size + in each dimension. Default size is 3 for each dimension. + + Returns: + cupy.ndarray: An array the same size as input containing the median + filtered result. + + .. seealso:: :func:`cupyx.scipy.ndimage.median_filter` + .. seealso:: :func:`scipy.signal.medfilt` + """ + if volume.dtype.char == 'e': + # scipy doesn't support float16 + raise ValueError("float16 type not supported") + if volume.dtype.kind == 'b': + # scipy doesn't support bool + raise ValueError("bool type not supported") + kernel_size = _get_kernel_size(kernel_size, volume.ndim) + if volume.dtype == 'F': + raise TypeError("complex types not supported") + if volume.dtype.kind == 'c': + # scipy doesn't support complex + raise ValueError("complex types not supported") + if any(k > s for k, s in zip(kernel_size, volume.shape)): + warnings.warn('kernel_size exceeds volume extent: ' + 'volume will be zero-padded') + + size = internal.prod(kernel_size) + return _filters.rank_filter(volume, size // 2, size=kernel_size, + mode='constant') + + +def medfilt2d(input, kernel_size=3): + """Median filter a 2-dimensional array. + + Apply a median filter to the `input` array using a local window-size given + by `kernel_size` (must be odd). The array is zero-padded automatically. + + Args: + input (cupy.ndarray): A 2-dimensional input array. + kernel_size (int of list of ints of length 2): Gives the size of the + median filter window in each dimension. Elements of `kernel_size` + should be odd. If `kernel_size` is a scalar, then this scalar is + used as the size in each dimension. Default is a kernel of size + (3, 3). + + Returns: + cupy.ndarray: An array the same size as input containing the median + filtered result. + + See also + -------- + .. seealso:: :func:`cupyx.scipy.ndimage.median_filter` + .. seealso:: :func:`cupyx.scipy.signal.medfilt` + .. seealso:: :func:`scipy.signal.medfilt2d` + """ + if input.dtype.char == 'e': + # scipy doesn't support float16 + raise ValueError("float16 type not supported") + if input.dtype.kind == 'b': + # scipy doesn't support bool + raise ValueError("bool type not supported") + if input.ndim != 2: + raise ValueError('input must be 2d') + kernel_size = _get_kernel_size(kernel_size, input.ndim) + if input.dtype == 'F': + raise TypeError("complex types not supported") + if input.dtype.kind == 'c': + # scipy doesn't support complex + raise ValueError("complex types not supported") + order = kernel_size[0] * kernel_size[1] // 2 + return _filters.rank_filter( + input, order, size=kernel_size, mode='constant') + + +def lfilter(b, a, x, axis=-1, zi=None): + """ + Filter data along one-dimension with an IIR or FIR filter. + + Filter a data sequence, `x`, using a digital filter. This works for many + fundamental data types (including Object type). The filter is a direct + form II transposed implementation of the standard difference equation + (see Notes). + + The function `sosfilt` (and filter design using ``output='sos'``) should be + preferred over `lfilter` for most filtering tasks, as second-order sections + have fewer numerical problems. + + Parameters + ---------- + b : array_like + The numerator coefficient vector in a 1-D sequence. + a : array_like + The denominator coefficient vector in a 1-D sequence. If ``a[0]`` + is not 1, then both `a` and `b` are normalized by ``a[0]``. + x : array_like + An N-dimensional input array. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + zi : array_like, optional + Initial conditions for the filter delays. It is a vector + (or array of vectors for an N-dimensional input) of length + ``len(b) + len(a) - 2``. The first ``len(b)`` numbers correspond to the + last elements of the previous input and the last ``len(a)`` to the last + elements of the previous output. If `zi` is None or is not given then + initial rest is assumed. See `lfiltic` for more information. + + **Note**: This argument differs from dimensions from the SciPy + implementation! However, as long as they are chained from the same + library, the output result will be the same. Please make sure to use + the `zi` from CuPy calls and not from SciPy. This due to the parallel + nature of this implementation as opposed to the serial one in SciPy. + + Returns + ------- + y : array + The output of the digital filter. + zf : array, optional + If `zi` is None, this is not returned, otherwise, `zf` holds the + final filter delay values. + + See Also + -------- + lfiltic : Construct initial conditions for `lfilter`. + lfilter_zi : Compute initial state (steady state of step response) for + `lfilter`. + filtfilt : A forward-backward filter, to obtain a filter with zero phase. + savgol_filter : A Savitzky-Golay filter. + sosfilt: Filter data using cascaded second-order sections. + sosfiltfilt: A forward-backward filter using second-order sections. + + Notes + ----- + The filter function is implemented as a direct II transposed structure. + This means that the filter implements:: + + a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M] + - a[1]*y[n-1] - ... - a[N]*y[n-N] + + where `M` is the degree of the numerator, `N` is the degree of the + denominator, `n` is the sample number and `L` denotes the length of the + input. It is implemented by computing first the FIR part and then + computing the IIR part from it:: + + a[0] * y = r(f(x, b), a) + f(x, b)[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M] + r(y, a)[n] = - a[1]*y[n-1] - ... - a[N]*y[n-N] + + The IIR result is computed in parallel by first dividing the input signal + into chunks (`g_i`) of size `m`. For each chunk, the IIR recurrence + equation is applied to each chunk (in parallel). Then the chunks are merged + based on the last N values of the last chunk:: + + nc = L/m + x = [g_0, g_1, ..., g_nc] + + g_i = [x[i * m], ..., x[i * m + m - 1]] + p_i = r(g_i, a) + + o_i = r(p_i, c(p_{i - 1})) if i > 1, + r(p_i, zi) otherwise + + y = [o_0, o_1, ..., o_nc] + + where `c` denotes a function that takes a chunk, slices the last `N` values + and adjust them using a correction factor table computed using the + (1, 2, ..., N)-fibonacci sequence. For more information see [1]_. + + The rational transfer function describing this filter in the + z-transform domain is:: + + -1 -M + b[0] + b[1]z + ... + b[M] z + Y(z) = -------------------------------- X(z) + -1 -N + a[0] + a[1]z + ... + a[N] z + + References + ---------- + .. [1] Sepideh Maleki and Martin Burtscher. + 2018. Automatic Hierarchical Parallelization of Linear Recurrences. + SIGPLAN Not. 53, 2 (February 2018), 128-138. + `10.1145/3173162.3173168 `_ + """ + a0 = a[0] + a_r = - a[1:] / a0 + b = b / a0 + + num_b = b.size - 1 + num_a = a_r.size + x_ndim = x.ndim + axis = internal._normalize_axis_index(axis, x_ndim) + n = x.shape[axis] + fir_dtype = cupy.result_type(x, b) + + prev_in = None + prev_out = None + pad_shape = list(x.shape) + pad_shape[axis] += num_b + + x_full = cupy.zeros(pad_shape, dtype=fir_dtype) + if zi is not None: + zi = cupy.atleast_1d(zi) + if num_b > 0: + prev_in = axis_slice(zi, 0, num_b, axis=axis) + if num_a > 0: + prev_out = axis_slice( + zi, zi.shape[axis] - num_a, zi.shape[axis], axis=axis) + + if prev_in is not None: + x_full = axis_assign(x_full, prev_in, 0, num_b, axis=axis) + + x_full = axis_assign(x_full, x, num_b, axis=axis) + origin = -num_b // 2 + out = cupy.empty_like(x_full, dtype=fir_dtype) + out = _filters.convolve1d( + x_full, b, axis=axis, mode='constant', origin=origin, output=out) + + if num_b > 0: + out = axis_slice(out, out.shape[axis] - n, out.shape[axis], axis=axis) + + if a_r.size > 0: + iir_dtype = cupy.result_type(fir_dtype, a) + const_dtype = cupy.dtype(a.dtype) + if const_dtype.kind == 'u': + const_dtype = cupy.dtype(const_dtype.char.lower()) + a = a.astype(const_dtype) + + out = apply_iir(out, a_r, axis=axis, zi=prev_out, dtype=iir_dtype) + + if zi is not None: + zi = cupy.empty(zi.shape, dtype=out.dtype) + if num_b > 0: + prev_in = axis_slice( + x, x.shape[axis] - num_b, x.shape[axis], axis=axis) + zi = axis_assign(zi, prev_in, 0, num_b, axis=axis) + if num_a > 0: + prev_out = axis_slice( + out, out.shape[axis] - num_a, out.shape[axis], axis=axis) + zi = axis_assign( + zi, prev_out, zi.shape[axis] - num_a, zi.shape[axis], + axis=axis) + return out, zi + else: + return out + + +def lfiltic(b, a, y, x=None): + """ + Construct initial conditions for lfilter given input and output vectors. + + Given a linear filter (b, a) and initial conditions on the output `y` + and the input `x`, return the initial conditions on the state vector zi + which is used by `lfilter` to generate the output given the input. + + Parameters + ---------- + b : array_like + Linear filter term. + a : array_like + Linear filter term. + y : array_like + Initial conditions. + If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``. + If `y` is too short, it is padded with zeros. + x : array_like, optional + Initial conditions. + If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``. + If `x` is not given, its initial conditions are assumed zero. + If `x` is too short, it is padded with zeros. + axis: int, optional + The axis to take the initial conditions from, if `x` and `y` are + n-dimensional + + Returns + ------- + zi : ndarray + The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, + where ``K = M + N``. + + See Also + -------- + lfilter, lfilter_zi + """ + # SciPy implementation only supports 1D initial conditions, however, + # lfilter accepts n-dimensional initial conditions. If SciPy implementation + # accepts n-dimensional arrays, then axis can be moved to the signature. + axis = -1 + fir_len = b.size - 1 + iir_len = a.size - 1 + + if y is None and x is None: + return None + + ref_ndim = y.ndim if y is not None else x.ndim + axis = internal._normalize_axis_index(axis, ref_ndim) + + zi = cupy.empty(0) + if y is not None and iir_len > 0: + pad_y = cupy.concatenate( + (y, cupy.zeros(max(iir_len - y.shape[axis], 0))), axis=axis) + zi = cupy.take(pad_y, list(range(iir_len)), axis=axis) + zi = cupy.flip(zi, axis) + + if x is not None and fir_len > 0: + pad_x = cupy.concatenate( + (x, cupy.zeros(max(fir_len - x.shape[axis], 0))), axis=axis) + fir_zi = cupy.take(pad_x, list(range(fir_len)), axis=axis) + fir_zi = cupy.flip(fir_zi, axis) + zi = cupy.concatenate((fir_zi, zi), axis=axis) + return zi + + +def lfilter_zi(b, a): + """ + Construct initial conditions for lfilter for step response steady-state. + + Compute an initial state `zi` for the `lfilter` function that corresponds + to the steady state of the step response. + + A typical use of this function is to set the initial state so that the + output of the filter starts at the same value as the first element of + the signal to be filtered. + + Parameters + ---------- + b, a : array_like (1-D) + The IIR filter coefficients. See `lfilter` for more + information. + + Returns + ------- + zi : 1-D ndarray + The initial state for the filter. + + See Also + -------- + lfilter, lfiltic, filtfilt + """ + a0 = a[0] + a_r = - a[1:] / a0 + # b = b / a0 + num_b = b.size - 1 + num_a = a_r.size + + # The initial state for a FIR filter will be always one for a step input + zi = cupy.ones(num_b) + if num_a > 0: + zi_t = cupy.r_[zi, cupy.zeros(num_a)] + y, _ = lfilter(b, a, cupy.ones(num_a + 1), zi=zi_t) + y1 = y[:num_a] + y2 = y[-num_a:] + zero_coef = cupy.where(a_r == 0)[0] + + C = compute_correction_factors(a_r, a_r.size + 1, a_r.dtype) + C = C[:, a_r.size:] + C1 = C[:, :a_r.size].T + C2 = C[:, -a_r.size:].T + + # Take the difference between the non-adjusted output values and + # compute which initial output state would cause them to be constant. + if not len(zero_coef): + y_zi = cupy.linalg.solve(C1 - C2, y2 - y1) + else: + # Any zero coefficient would cause the system to be underdetermined + # therefore a least square solution is computed instead. + y_zi, _, _, _ = cupy.linalg.lstsq(C1 - C2, y2 - y1, rcond=None) + + y_zi = cupy.nan_to_num(y_zi, nan=0, posinf=cupy.inf, neginf=-cupy.inf) + zi = cupy.r_[zi, y_zi[::-1]] + return zi + + +def detrend(data, axis=-1, type='linear', bp=0, overwrite_data=False): + """ + Remove linear trend along axis from data. + + Parameters + ---------- + data : array_like + The input data. + axis : int, optional + The axis along which to detrend the data. By default this is the + last axis (-1). + type : {'linear', 'constant'}, optional + The type of detrending. If ``type == 'linear'`` (default), + the result of a linear least-squares fit to `data` is subtracted + from `data`. + If ``type == 'constant'``, only the mean of `data` is subtracted. + bp : array_like of ints, optional + A sequence of break points. If given, an individual linear fit is + performed for each part of `data` between two break points. + Break points are specified as indices into `data`. This parameter + only has an effect when ``type == 'linear'``. + overwrite_data : bool, optional + If True, perform in place detrending and avoid a copy. Default is False + + Returns + ------- + ret : ndarray + The detrended input data. + + See Also + -------- + scipy.signal.detrend + + + """ + if type not in ['linear', 'l', 'constant', 'c']: + raise ValueError("Trend type must be 'linear' or 'constant'.") + data = cupy.asarray(data) + dtype = data.dtype.char + if dtype not in 'dfDF': + dtype = 'd' + if type in ['constant', 'c']: + ret = data - cupy.mean(data, axis, keepdims=True) + return ret + else: + dshape = data.shape + N = dshape[axis] + bp = cupy.sort(cupy.unique(cupy.r_[0, bp, N])) + if cupy.any(bp > N): + raise ValueError("Breakpoints must be less than length " + "of data along given axis.") + bp = bp.tolist() + # Restructure data so that axis is along first dimension and + # all other dimensions are collapsed into second dimension + rnk = len(dshape) + if axis < 0: + axis = axis + rnk + newdata = cupy.moveaxis(data, axis, 0) + newdata_shape = newdata.shape + newdata = newdata.reshape(N, -1) + + if not overwrite_data: + newdata = newdata.copy() # make sure we have a copy + if newdata.dtype.char not in 'dfDF': + newdata = newdata.astype(dtype) + + # Find leastsq fit and remove it for each piece + for m in range(len(bp) - 1): + Npts = bp[m + 1] - bp[m] + A = cupy.ones((Npts, 2), dtype) + A[:, 0] = cupy.arange(1, Npts + 1, dtype=dtype) / Npts + sl = slice(bp[m], bp[m + 1]) + coef, resids, rank, s = lstsq(A, newdata[sl], rcond=None) + newdata[sl] = newdata[sl] - A @ coef + + # Put data back in original shape. + newdata = newdata.reshape(newdata_shape) + ret = cupy.moveaxis(newdata, 0, axis) + return ret + + +def _filtfilt_gust(b, a, x, axis=-1, irlen=None): + """Forward-backward IIR filter that uses Gustafsson's method. + + Apply the IIR filter defined by `(b,a)` to `x` twice, first forward + then backward, using Gustafsson's initial conditions [1]_. + + Let ``y_fb`` be the result of filtering first forward and then backward, + and let ``y_bf`` be the result of filtering first backward then forward. + Gustafsson's method is to compute initial conditions for the forward + pass and the backward pass such that ``y_fb == y_bf``. + + Parameters + ---------- + b : scalar or 1-D ndarray + Numerator coefficients of the filter. + a : scalar or 1-D ndarray + Denominator coefficients of the filter. + x : ndarray + Data to be filtered. + axis : int, optional + Axis of `x` to be filtered. Default is -1. + irlen : int or None, optional + The length of the nonnegligible part of the impulse response. + If `irlen` is None, or if the length of the signal is less than + ``2 * irlen``, then no part of the impulse response is ignored. + + Returns + ------- + y : ndarray + The filtered data. + x0 : ndarray + Initial condition for the forward filter. + x1 : ndarray + Initial condition for the backward filter. + + Notes + ----- + Typically the return values `x0` and `x1` are not needed by the + caller. The intended use of these return values is in unit tests. + + References + ---------- + .. [1] F. Gustaffson. Determining the initial states in forward-backward + filtering. Transactions on Signal Processing, 46(4):988-992, 1996. + """ + # In the comments, "Gustafsson's paper" and [1] refer to the + # paper referenced in the docstring. + + b = cupy.atleast_1d(b) + a = cupy.atleast_1d(a) + + order = max(len(b), len(a)) - 1 + if order == 0: + # The filter is just scalar multiplication, with no state. + scale = (b[0] / a[0]) ** 2 + y = scale * x + return y, cupy.array([]), cupy.array([]) + + if axis != -1 or axis != x.ndim - 1: + # Move the axis containing the data to the end. + x = cupy.swapaxes(x, axis, x.ndim - 1) + + # n is the number of samples in the data to be filtered. + n = x.shape[-1] + + if irlen is None or n <= 2 * irlen: + m = n + else: + m = irlen + + # Create Obs, the observability matrix (called O in the paper). + # This matrix can be interpreted as the operator that propagates + # an arbitrary initial state to the output, assuming the input is + # zero. + # In Gustafsson's paper, the forward and backward filters are not + # necessarily the same, so he has both O_f and O_b. We use the same + # filter in both directions, so we only need O. The same comment + # applies to S below. + Obs = cupy.zeros((m, order)) + x_in = cupy.zeros(m) + x_in[0] = 1 + Obs[:, 0] = lfilter(cupy.ones(1), a, x_in) + for k in range(1, order): + Obs[k:, k] = Obs[:-k, 0] + + # Obsr is O^R (Gustafsson's notation for row-reversed O) + Obsr = Obs[::-1] + + # Create S. S is the matrix that applies the filter to the reversed + # propagated initial conditions. That is, + # out = S.dot(zi) + # is the same as + # tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs. + # out = lfilter(b, a, tmp[::-1]) # Reverse and filter. + + # Equations (5) & (6) of [1] + S = lfilter(b, a, Obs[::-1], axis=0) + + # Sr is S^R (row-reversed S) + Sr = S[::-1] + + # M is [(S^R - O), (O^R - S)] + if m == n: + M = cupy.hstack((Sr - Obs, Obsr - S)) + else: + # Matrix described in section IV of [1]. + M = cupy.zeros((2*m, 2*order)) + M[:m, :order] = Sr - Obs + M[m:, order:] = Obsr - S + + # Naive forward-backward and backward-forward filters. + # These have large transients because the filters use zero initial + # conditions. + y_f = lfilter(b, a, x) + y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1] + + y_b = lfilter(b, a, x[..., ::-1])[..., ::-1] + y_bf = lfilter(b, a, y_b) + + delta_y_bf_fb = y_bf - y_fb + if m == n: + delta = delta_y_bf_fb + else: + start_m = delta_y_bf_fb[..., :m] + end_m = delta_y_bf_fb[..., -m:] + delta = cupy.concatenate((start_m, end_m), axis=-1) + + # ic_opt holds the "optimal" initial conditions. + # The following code computes the result shown in the formula + # of the paper between equations (6) and (7). + if delta.ndim == 1: + ic_opt = cupy.linalg.lstsq(M, delta, rcond=None)[0] + else: + # Reshape delta so it can be used as an array of multiple + # right-hand-sides in linalg.lstsq. + delta2d = delta.reshape(-1, delta.shape[-1]).T + ic_opt0 = cupy.linalg.lstsq(M, delta2d, rcond=None)[0].T + ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],)) + + # Now compute the filtered signal using equation (7) of [1]. + # First, form [S^R, O^R] and call it W. + if m == n: + W = cupy.hstack((Sr, Obsr)) + else: + W = cupy.zeros((2*m, 2*order)) + W[:m, :order] = Sr + W[m:, order:] = Obsr + + # Equation (7) of [1] says + # Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt] + # `wic` is (almost) the product on the right. + # W has shape (m, 2*order), and ic_opt has shape (..., 2*order), + # so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T, + # so wic has shape (..., m). + wic = ic_opt.dot(W.T) + + # `wic` is "almost" the product of W and the optimal ICs in equation + # (7)--if we're using a truncated impulse response (m < n), `wic` + # contains only the adjustments required for the ends of the signal. + # Here we form y_opt, taking this into account if necessary. + y_opt = y_fb + if m == n: + y_opt += wic + else: + y_opt[..., :m] += wic[..., :m] + y_opt[..., -m:] += wic[..., -m:] + + x0 = ic_opt[..., :order] + x1 = ic_opt[..., -order:] + if axis != -1 or axis != x.ndim - 1: + # Restore the data axis to its original position. + x0 = cupy.swapaxes(x0, axis, x.ndim - 1) + x1 = cupy.swapaxes(x1, axis, x.ndim - 1) + y_opt = cupy.swapaxes(y_opt, axis, x.ndim - 1) + + return y_opt, x0, x1 + + +def _validate_pad(padtype, padlen, x, axis, ntaps): + """Helper to validate padding for filtfilt""" + if padtype not in ['even', 'odd', 'constant', None]: + raise ValueError(("Unknown value '%s' given to padtype. padtype " + "must be 'even', 'odd', 'constant', or None.") % + padtype) + + if padtype is None: + padlen = 0 + + if padlen is None: + # Original padding; preserved for backwards compatibility. + edge = ntaps * 3 + else: + edge = padlen + + # x's 'axis' dimension must be bigger than edge. + if x.shape[axis] <= edge: + raise ValueError("The length of the input vector x must be greater " + "than padlen, which is %d." % edge) + + if padtype is not None and edge > 0: + # Make an extension of length `edge` at each + # end of the input array. + if padtype == 'even': + ext = even_ext(x, edge, axis=axis) + elif padtype == 'odd': + ext = odd_ext(x, edge, axis=axis) + else: + ext = const_ext(x, edge, axis=axis) + else: + ext = x + return edge, ext + + +def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad', + irlen=None): + """ + Apply a digital filter forward and backward to a signal. + + This function applies a linear digital filter twice, once forward and + once backwards. The combined filter has zero phase and a filter order + twice that of the original. + + The function provides options for handling the edges of the signal. + + The function `sosfiltfilt` (and filter design using ``output='sos'``) + should be preferred over `filtfilt` for most filtering tasks, as + second-order sections have fewer numerical problems. + + Parameters + ---------- + b : (N,) array_like + The numerator coefficient vector of the filter. + a : (N,) array_like + The denominator coefficient vector of the filter. If ``a[0]`` + is not 1, then both `a` and `b` are normalized by ``a[0]``. + x : array_like + The array of data to be filtered. + axis : int, optional + The axis of `x` to which the filter is applied. + Default is -1. + padtype : str or None, optional + Must be 'odd', 'even', 'constant', or None. This determines the + type of extension to use for the padded signal to which the filter + is applied. If `padtype` is None, no padding is used. The default + is 'odd'. + padlen : int or None, optional + The number of elements by which to extend `x` at both ends of + `axis` before applying the filter. This value must be less than + ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. + The default value is ``3 * max(len(a), len(b))``. + method : str, optional + Determines the method for handling the edges of the signal, either + "pad" or "gust". When `method` is "pad", the signal is padded; the + type of padding is determined by `padtype` and `padlen`, and `irlen` + is ignored. When `method` is "gust", Gustafsson's method is used, + and `padtype` and `padlen` are ignored. + irlen : int or None, optional + When `method` is "gust", `irlen` specifies the length of the + impulse response of the filter. If `irlen` is None, no part + of the impulse response is ignored. For a long signal, specifying + `irlen` can significantly improve the performance of the filter. + + Returns + ------- + y : ndarray + The filtered output with the same shape as `x`. + + See Also + -------- + sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt + + Notes + ----- + When `method` is "pad", the function pads the data along the given axis + in one of three ways: odd, even or constant. The odd and even extensions + have the corresponding symmetry about the end point of the data. The + constant extension extends the data with the values at the end points. On + both the forward and backward passes, the initial condition of the + filter is found by using `lfilter_zi` and scaling it by the end point of + the extended data. + + When `method` is "gust", Gustafsson's method [1]_ is used. Initial + conditions are chosen for the forward and backward passes so that the + forward-backward filter gives the same result as the backward-forward + filter. + + References + ---------- + .. [1] F. Gustaffson, "Determining the initial states in forward-backward + filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992, + 1996. + """ + b = cupy.atleast_1d(b) + a = cupy.atleast_1d(a) + x = cupy.asarray(x) + + if method not in {"pad", "gust"}: + raise ValueError("method must be 'pad' or 'gust'.") + + const_dtype = cupy.dtype(a.dtype) + if const_dtype.kind == 'u': + const_dtype = cupy.dtype(const_dtype.char.lower()) + a = a.astype(const_dtype) + + if method == "gust": + y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) + return y + + # method == "pad" + edge, ext = _validate_pad(padtype, padlen, x, axis, + ntaps=max(len(a), len(b))) + + # Get the steady state of the filter's step response. + zi = lfilter_zi(b, a) + + # Reshape zi and create x0 so that zi*x0 broadcasts + # to the correct value for the 'zi' keyword argument + # to lfilter. + zi_shape = [1] * x.ndim + zi_shape[axis] = zi.size + zi = cupy.reshape(zi, zi_shape) + x0 = axis_slice(ext, stop=1, axis=axis) + + # Forward filter. + (y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0) + + # Backward filter. + # Create y0 so zi*y0 broadcasts appropriately. + y0 = axis_slice(y, start=-1, axis=axis) + (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0) + + # Reverse y. + y = axis_reverse(y, axis=axis) + + if edge > 0: + # Slice the actual signal from the extended signal. + y = axis_slice(y, start=edge, stop=-edge, axis=axis) + + return y + + +def deconvolve(signal, divisor): + """Deconvolves ``divisor`` out of ``signal`` using inverse filtering. + + Returns the quotient and remainder such that + ``signal = convolve(divisor, quotient) + remainder`` + + Parameters + ---------- + signal : (N,) array_like + Signal data, typically a recorded signal + divisor : (N,) array_like + Divisor data, typically an impulse response or filter that was + applied to the original signal + + Returns + ------- + quotient : ndarray + Quotient, typically the recovered original signal + remainder : ndarray + Remainder + + See Also + -------- + cupy.polydiv : performs polynomial division (same operation, but + also accepts poly1d objects) + + Examples + -------- + Deconvolve a signal that's been filtered: + + >>> from cupyx.scipy import signal + >>> original = [0, 1, 0, 0, 1, 1, 0, 0] + >>> impulse_response = [2, 1] + >>> recorded = signal.convolve(impulse_response, original) + >>> recorded + array([0, 2, 1, 0, 2, 3, 1, 0, 0]) + >>> recovered, remainder = signal.deconvolve(recorded, impulse_response) + >>> recovered + array([ 0., 1., 0., 0., 1., 1., 0., 0.]) + + """ + num = cupy.atleast_1d(signal) + den = cupy.atleast_1d(divisor) + if num.ndim > 1: + raise ValueError("signal must be 1-D.") + if den.ndim > 1: + raise ValueError("divisor must be 1-D.") + N = len(num) + D = len(den) + if D > N: + quot = [] + rem = num + else: + input = cupy.zeros(N - D + 1, float) + input[0] = 1 + quot = lfilter(num, den, input) + rem = num - convolve(den, quot, mode='full') + return quot, rem + + +def _get_kernel_size(kernel_size, ndim): + if kernel_size is None: + kernel_size = (3,) * ndim + kernel_size = _util._fix_sequence_arg(kernel_size, ndim, + 'kernel_size', int) + if any((k % 2) != 1 for k in kernel_size): + raise ValueError("Each element of kernel_size should be odd") + return kernel_size + + +def _validate_sos(sos): + """Helper to validate a SOS input""" + sos = cupy.atleast_2d(sos) + if sos.ndim != 2: + raise ValueError('sos array must be 2D') + n_sections, m = sos.shape + if m != 6: + raise ValueError('sos array must be shape (n_sections, 6)') + if not (cupy.abs(sos[:, 3] - 1.0) <= 1e-15).all(): + raise ValueError('sos[:, 3] should be all ones') + return sos, n_sections + + +def _validate_x(x): + x = cupy.asarray(x) + if x.ndim == 0: + raise ValueError('x must be at least 1-D') + return x + + +def sosfilt(sos, x, axis=-1, zi=None): + """ + Filter data along one dimension using cascaded second-order sections. + + Filter a data sequence, `x`, using a digital IIR filter defined by + `sos`. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + x : array_like + An N-dimensional input array. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + zi : array_like, optional + Initial conditions for the cascaded filter delays. It is a (at + least 2D) vector of shape ``(n_sections, ..., 4, ...)``, where + ``..., 4, ...`` denotes the shape of `x`, but with ``x.shape[axis]`` + replaced by 4. If `zi` is None or is not given then initial rest + (i.e. all zeros) is assumed. + Note that these initial conditions are *not* the same as the initial + conditions given by `lfiltic` or `lfilter_zi`. + + Returns + ------- + y : ndarray + The output of the digital filter. + zf : ndarray, optional + If `zi` is None, this is not returned, otherwise, `zf` holds the + final filter delay values. + + See Also + -------- + zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz + """ + x_ndim = x.ndim + axis = internal._normalize_axis_index(axis, x_ndim) + out = x + + out = apply_iir_sos(out, sos, axis, zi) + return out + + +def sosfilt_zi(sos): + """ + Construct initial conditions for sosfilt for step response steady-state. + + Compute an initial state `zi` for the `sosfilt` function that corresponds + to the steady state of the step response. + + A typical use of this function is to set the initial state so that the + output of the filter starts at the same value as the first element of + the signal to be filtered. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. See `sosfilt` for the SOS filter format + specification. + + Returns + ------- + zi : ndarray + Initial conditions suitable for use with ``sosfilt``, shape + ``(n_sections, 4)``. + + See Also + -------- + sosfilt, zpk2sos + """ + n_sections = sos.shape[0] + + C = compute_correction_factors_sos(sos, 3, sos.dtype) + zi = cupy.zeros((sos.shape[0], 4), dtype=sos.dtype) + + # The initial state for a FIR filter will be always one for a step input + x_s = cupy.ones(3, dtype=sos.dtype) + for s in range(n_sections): + zi_s = cupy.atleast_2d(zi[s]) + sos_s = cupy.atleast_2d(sos[s]) + + # The FIR starting value that guarantees a constant output will be + # the same constant input values. + zi_s[0, :2] = x_s[:2] + + # Find the non-adjusted values after applying the IIR filter. + y_s, _ = sosfilt(sos_s, x_s, zi=zi_s) + + C_s = C[s] + y1 = y_s[:2] + y2 = y_s[-2:] + C1 = C_s[:, :2].T + C2 = C_s[:, -2:].T + + zero_iir_coef = cupy.where(sos[s, 3:] == 0)[0] + + # Take the difference between the non-adjusted output values and + # compute which initial output state would cause them to be constant. + if not len(zero_iir_coef): + y_zi = cupy.linalg.solve(C1 - C2, y2 - y1) + else: + # Any zero coefficient would cause the system to be underdetermined + # therefore a least square solution is computed instead. + y_zi, _, _, _ = cupy.linalg.lstsq(C1 - C2, y2 - y1, rcond=None) + + y_zi = cupy.nan_to_num(y_zi, nan=0, posinf=cupy.inf, neginf=-cupy.inf) + zi_s[0, 2:] = y_zi[::-1] + x_s, _ = sosfilt(sos_s, x_s, zi=zi_s) + + return zi + + +def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None): + """ + A forward-backward digital filter using cascaded second-order sections. + + See `filtfilt` for more complete information about this method. + + Parameters + ---------- + sos : array_like + Array of second-order filter coefficients, must have shape + ``(n_sections, 6)``. Each row corresponds to a second-order + section, with the first three columns providing the numerator + coefficients and the last three providing the denominator + coefficients. + x : array_like + The array of data to be filtered. + axis : int, optional + The axis of `x` to which the filter is applied. + Default is -1. + padtype : str or None, optional + Must be 'odd', 'even', 'constant', or None. This determines the + type of extension to use for the padded signal to which the filter + is applied. If `padtype` is None, no padding is used. The default + is 'odd'. + padlen : int or None, optional + The number of elements by which to extend `x` at both ends of + `axis` before applying the filter. This value must be less than + ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. + The default value is:: + + 3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(), + (sos[:, 5] == 0).sum())) + + The extra subtraction at the end attempts to compensate for poles + and zeros at the origin (e.g. for odd-order filters) to yield + equivalent estimates of `padlen` to those of `filtfilt` for + second-order section filters built with `scipy.signal` functions. + + Returns + ------- + y : ndarray + The filtered output with the same shape as `x`. + + See Also + -------- + filtfilt, sosfilt, sosfilt_zi, sosfreqz + """ + sos, n_sections = _validate_sos(sos) + x = _validate_x(x) + + # `method` is "pad"... + ntaps = 2 * n_sections + 1 + ntaps -= min((sos[:, 2] == 0).sum().item(), (sos[:, 5] == 0).sum().item()) + edge, ext = _validate_pad(padtype, padlen, x, axis, + ntaps=ntaps) + + # These steps follow the same form as filtfilt with modifications + zi = sosfilt_zi(sos) # shape (n_sections, 4) --> (n_sections, ..., 4, ...) + zi_shape = [1] * x.ndim + zi_shape[axis] = 4 + zi.shape = [n_sections] + zi_shape + x_0 = axis_slice(ext, stop=1, axis=axis) + (y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0) + y_0 = axis_slice(y, start=-1, axis=axis) + (y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0) + y = axis_reverse(y, axis=axis) + if edge > 0: + y = axis_slice(y, start=edge, stop=-edge, axis=axis) + return y + + +def hilbert(x, N=None, axis=-1): + """ + Compute the analytic signal, using the Hilbert transform. + + The transformation is done along the last axis by default. + + Parameters + ---------- + x : ndarray + Signal data. Must be real. + N : int, optional + Number of Fourier components. Default: ``x.shape[axis]`` + axis : int, optional + Axis along which to do the transformation. Default: -1. + + Returns + ------- + xa : ndarray + Analytic signal of `x`, of each 1-D array along `axis` + + Notes + ----- + The analytic signal ``x_a(t)`` of signal ``x(t)`` is: + + .. math:: x_a = F^{-1}(F(x) 2U) = x + i y + + where `F` is the Fourier transform, `U` the unit step function, + and `y` the Hilbert transform of `x`. [1]_ + + In other words, the negative half of the frequency spectrum is zeroed + out, turning the real-valued signal into a complex signal. The Hilbert + transformed signal can be obtained from ``np.imag(hilbert(x))``, and the + original signal from ``np.real(hilbert(x))``. + + References + ---------- + .. [1] Wikipedia, "Analytic signal". + https://en.wikipedia.org/wiki/Analytic_signal + + See Also + -------- + scipy.signal.hilbert + + """ + if cupy.iscomplexobj(x): + raise ValueError("x must be real.") + if N is None: + N = x.shape[axis] + if N <= 0: + raise ValueError("N must be positive.") + + Xf = sp_fft.fft(x, N, axis=axis) + h = cupy.zeros(N, dtype=Xf.dtype) + if N % 2 == 0: + h[0] = h[N // 2] = 1 + h[1:N // 2] = 2 + else: + h[0] = 1 + h[1:(N + 1) // 2] = 2 + + if x.ndim > 1: + ind = [cupy.newaxis] * x.ndim + ind[axis] = slice(None) + h = h[tuple(ind)] + x = sp_fft.ifft(Xf * h, axis=axis) + return x + + +def hilbert2(x, N=None): + """ + Compute the '2-D' analytic signal of `x` + + Parameters + ---------- + x : ndarray + 2-D signal data. + N : int or tuple of two ints, optional + Number of Fourier components. Default is ``x.shape`` + + Returns + ------- + xa : ndarray + Analytic signal of `x` taken along axes (0,1). + + See Also + -------- + scipy.signal.hilbert2 + + """ + if x.ndim < 2: + x = cupy.atleast_2d(x) + if x.ndim > 2: + raise ValueError("x must be 2-D.") + if cupy.iscomplexobj(x): + raise ValueError("x must be real.") + if N is None: + N = x.shape + elif isinstance(N, int): + if N <= 0: + raise ValueError("N must be positive.") + N = (N, N) + elif len(N) != 2 or (N[0] <= 0 or N[1] <= 0): + raise ValueError("When given as a tuple, N must hold exactly " + "two positive integers") + + Xf = sp_fft.fft2(x, N, axes=(0, 1)) + h1 = cupy.zeros(N[0], dtype=Xf.dtype) + h2 = cupy.zeros(N[1], dtype=Xf.dtype) + for h in (h1, h1): + N1 = h.shape[0] + if N1 % 2 == 0: + h[0] = h[N1 // 2] = 1 + h[1:N1 // 2] = 2 + else: + h[0] = 1 + h[1:(N1 + 1) // 2] = 2 + + h = h1[:, cupy.newaxis] * h2[cupy.newaxis, :] + k = x.ndim + while k > 2: + h = h[:, cupy.newaxis] + k -= 1 + x = sp_fft.ifft2(Xf * h, axes=(0, 1)) + return x diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_signaltools_core.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_signaltools_core.py new file mode 100644 index 0000000000000000000000000000000000000000..8fce6370a1cb4856aecfd397413bf6671c9014aa --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_signaltools_core.py @@ -0,0 +1,302 @@ +import math + +import cupy +from cupy._core import internal +from cupyx.scipy import fft +from cupyx.scipy.ndimage import _filters +from cupyx.scipy.ndimage import _util + + +def _check_conv_inputs(in1, in2, mode, convolution=True): + if in1.ndim == in2.ndim == 0: + return in1 * (in2 if convolution else in2.conj()) + if in1.ndim != in2.ndim: + raise ValueError('in1 and in2 should have the same dimensionality') + if in1.size == 0 or in2.size == 0: + return cupy.array([], dtype=in1.dtype) + if mode not in ('full', 'same', 'valid'): + raise ValueError('acceptable modes are "valid", "same", or "full"') + return None + + +def _direct_correlate(in1, in2, mode='full', output=float, convolution=False, + boundary='constant', fillvalue=0.0, shift=False): + if in1.ndim != 1 and (in1.dtype.kind == 'b' or + (in1.dtype.kind == 'f' and in1.dtype.itemsize < 4)): + raise ValueError('unsupported type in SciPy') + + # Swaps inputs so smaller one is in2: + # NOTE: when mode != 'valid' we can only swap with a constant-0 boundary + swapped_inputs = False + orig_in1_shape = in1.shape + if _inputs_swap_needed(mode, in1.shape, in2.shape) or ( + in2.size > in1.size and boundary == 'constant' and fillvalue == 0): + in1, in2 = in2, in1 + swapped_inputs = True + + # Due to several optimizations, the second array can only be 2 GiB + if in2.nbytes >= (1 << 31): + raise RuntimeError('smaller array must be 2 GiB or less, ' + 'use method="fft" instead') + + # At this point, in1.size > in2.size + # (except some cases when boundary != 'constant' or fillvalue != 0) + # Figure out the output shape and the origin of the kernel + if mode == 'full': + out_shape = tuple(x1+x2-1 for x1, x2 in zip(in1.shape, in2.shape)) + offsets = tuple(x-1 for x in in2.shape) + elif mode == 'valid': + out_shape = tuple(x1-x2+1 for x1, x2 in zip(in1.shape, in2.shape)) + offsets = (0,) * in1.ndim + else: # mode == 'same': + # In correlate2d: When using "same" mode with even-length inputs, the + # outputs of correlate and correlate2d differ: There is a 1-index + # offset between them. + # This is dealt with by using "shift" parameter. + out_shape = orig_in1_shape + if orig_in1_shape == in1.shape: + offsets = tuple((x-shift)//2 for x in in2.shape) + else: + offsets = tuple((2*x2-x1-(not convolution)+shift)//2 + for x1, x2 in zip(in1.shape, in2.shape)) + + # Check the output + # In SciPy, the output dtype is determined by inputs' dtypes + out_dtype = cupy.promote_types(in1, in2) + if not isinstance(output, cupy.ndarray): + if not cupy.can_cast(output, out_dtype): + raise ValueError('not available for this type') + output = cupy.empty(out_shape, out_dtype) + elif output.shape != out_shape: + raise ValueError('out has wrong shape') + elif output.dtype != out_dtype: + raise ValueError('out has wrong dtype') + + # Check input dtypes + # Internally, the kernel accumulates in in2's type, so if in2 has lower + # precision (can_cast = True!) we hit overflow easier + # TODO(leofang): this is a band-aid fix for cupy/cupy#6047 + if cupy.can_cast(in2, in1): + in2 = in2.astype(out_dtype) # make a copy while upcasting + + # Get and run the CuPy kernel + int_type = _util._get_inttype(in1) + kernel = _filters._get_correlate_kernel( + boundary, in2.shape, int_type, offsets, fillvalue) + in2 = _reverse(in2) if convolution else in2.conj() + if not swapped_inputs or convolution: + kernel(in1, in2, output) + elif output.dtype.kind != 'c': + # Avoids one array copy + kernel(in1, in2, _reverse(output)) + else: + kernel(in1, in2, output) + output = cupy.ascontiguousarray(_reverse(output)) + if swapped_inputs and (mode != 'valid' or not shift): + cupy.conjugate(output, out=output) + return output + + +def _reverse(x): + # Reverse array `x` in all dimensions + return x[(slice(None, None, -1),) * x.ndim] + + +def _inputs_swap_needed(mode, shape1, shape2, axes=None): + # See scipy's documentation in scipy.signal._signaltools + if mode != 'valid' or not shape1: + return False + if axes is None: + axes = tuple(range(len(shape1))) + not_ok1 = any(shape1[i] < shape2[i] for i in axes) + not_ok2 = any(shape1[i] > shape2[i] for i in axes) + if not_ok1 and not_ok2: + raise ValueError('For "valid" mode, one must be at least ' + 'as large as the other in every dimension') + return not_ok1 + + +def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False): + # See scipy's documentation in scipy.signal._signaltools + s1, s2 = in1.shape, in2.shape + axes = _init_nd_and_axes(in1, axes) + # Length-1 axes can rely on broadcasting rules, no fft needed + axes = [ax for ax in axes if s1[ax] != 1 and s2[ax] != 1] + if sorted_axes: + axes.sort() + + # Check that unused axes are either 1 (broadcast) or the same length + for ax, (dim1, dim2) in enumerate(zip(s1, s2)): + if ax not in axes and dim1 != dim2 and dim1 != 1 and dim2 != 1: + raise ValueError('incompatible shapes for in1 and in2:' + ' {} and {}'.format(s1, s2)) + + # Check that input sizes are compatible with 'valid' mode. + if _inputs_swap_needed(mode, s1, s2, axes=axes): + # Convolution is commutative + in1, in2 = in2, in1 + + return in1, in2, tuple(axes) + + +def _init_nd_and_axes(x, axes): + # See documentation in scipy.fft._helper._init_nd_shape_and_axes + # except shape argument is always None and doesn't return new shape + axes = internal._normalize_axis_indices(axes, x.ndim, sort_axes=False) + if not len(axes): + raise ValueError('when provided, axes cannot be empty') + if any(x.shape[ax] < 1 for ax in axes): + raise ValueError('invalid number of data points specified') + return axes + + +def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False): + # See scipy's documentation in scipy.signal._signaltools + if not axes: + # rfftn/irfftn require an axis or more. + return in1 * in2 + + real = (in1.dtype.kind != 'c' and in2.dtype.kind != 'c') + fshape = ([fft.next_fast_len(shape[a], real) for a in axes] + if calc_fast_len else shape) + fftn, ifftn = (fft.rfftn, fft.irfftn) if real else (fft.fftn, fft.ifftn) + + # Perform the convolution + sp1 = fftn(in1, fshape, axes=axes) + sp2 = fftn(in2, fshape, axes=axes) + out = ifftn(sp1 * sp2, fshape, axes=axes) + + return out[tuple(slice(x) for x in shape)] if calc_fast_len else out + + +def _apply_conv_mode(full, s1, s2, mode, axes): + # See scipy's documentation in scipy.signal._signaltools + if mode == 'full': + return cupy.ascontiguousarray(full) + if mode == 'valid': + s1 = [full.shape[a] if a not in axes else s1[a] - s2[a] + 1 + for a in range(full.ndim)] + starts = [(cur-new)//2 for cur, new in zip(full.shape, s1)] + slices = tuple(slice(start, start+length) + for start, length in zip(starts, s1)) + return cupy.ascontiguousarray(full[slices]) + + +__EXP_N1 = 0.36787944117144232159553 # exp(-1) + + +def _optimal_oa_block_size(overlap): + """ + Computes the optimal block size for the OA method given the overlap size. + + Computed as ``ceil(-overlap*W(-1/(2*e*overlap)))`` where ``W(z)`` is the + Lambert W function solved as per ``scipy.special.lambertw(z, -1)`` with a + fixed 4 iterations. + + Returned size should still be given to ``cupyx.scipy.fft.next_fast_len()``. + """ + + # This function is 10x faster in Cython (but only 1.7us in Python). Can be + # easily moved to Cython by: + # * adding `DEF` before `__EXP_N1` + # * changing `import math` to `from libc cimport math` + # * adding `@cython.cdivision(True)` before the function + # * adding `Py_ssize_t` as the type for the `overlap` argument + # * adding a cast `` or `int(...)` to the return value + # * adding the following type declarations: + # cdef double z, w, ew, wew, wewz + # cdef int i + + # Compute W(-1/(2*e*overlap)) + z = -__EXP_N1/(2*overlap) # value to compute for + w = -1 - math.log(2*overlap) # initial guess + for i in range(4): + ew = math.exp(w) + wew = w*ew + wewz = wew - z + w -= wewz/(wew + ew - (w + 2)*wewz/(2*w + 2)) + return math.ceil(-overlap*w) + + +def _calc_oa_lens(s1, s2): + # See scipy's documentation in scipy.signal._signaltools + + # Set up the arguments for the conventional FFT approach. + fallback = (s1+s2-1, None, s1, s2) + + # Use conventional FFT convolve if sizes are same. + if s1 == s2 or s1 == 1 or s2 == 1: + return fallback + + # Make s1 the larger size + swapped = s2 > s1 + if swapped: + s1, s2 = s2, s1 + + # There cannot be a useful block size if s2 is more than half of s1. + if s2 >= s1//2: + return fallback + + # Compute the optimal block size from the overlap + overlap = s2-1 + block_size = fft.next_fast_len(_optimal_oa_block_size(overlap)) + + # Use conventional FFT convolve if there is only going to be one block. + if block_size >= s1: + return fallback + + # Get step size for each of the blocks + in1_step, in2_step = block_size-s2+1, s2 + if swapped: + in1_step, in2_step = in2_step, in1_step + + return block_size, overlap, in1_step, in2_step + + +def _oa_reshape_inputs(in1, in2, axes, shape_final, + block_size, overlaps, in1_step, in2_step): + # Figure out the number of steps and padding. + # This would get too complicated in a list comprehension. + nsteps1 = [] + nsteps2 = [] + pad_size1 = [] + pad_size2 = [] + for i in range(in1.ndim): + if i not in axes: + pad_size1 += [(0, 0)] + pad_size2 += [(0, 0)] + continue + + curnstep1, curpad1, curnstep2, curpad2 = 1, 0, 1, 0 + + if in1.shape[i] > in1_step[i]: + curnstep1 = math.ceil((in1.shape[i]+1)/in1_step[i]) + if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]: + curnstep1 += 1 + curpad1 = curnstep1*in1_step[i] - in1.shape[i] + if in2.shape[i] > in2_step[i]: + curnstep2 = math.ceil((in2.shape[i]+1)/in2_step[i]) + if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]: + curnstep2 += 1 + curpad2 = curnstep2*in2_step[i] - in2.shape[i] + + nsteps1 += [curnstep1] + nsteps2 += [curnstep2] + pad_size1 += [(0, curpad1)] + pad_size2 += [(0, curpad2)] + + # Pad array to a size that can be reshaped to desired shape if necessary + if not all(curpad == (0, 0) for curpad in pad_size1): + in1 = cupy.pad(in1, pad_size1, mode='constant', constant_values=0) + if not all(curpad == (0, 0) for curpad in pad_size2): + in2 = cupy.pad(in2, pad_size2, mode='constant', constant_values=0) + + # We need to put each new dimension before the corresponding dimension + # being reshaped in order to get the data in the right layout at the end. + reshape_size1 = list(in1_step) + reshape_size2 = list(in2_step) + for i, iax in enumerate(axes): + reshape_size1.insert(iax+i, nsteps1[i]) + reshape_size2.insert(iax+i, nsteps2[i]) + + return in1.reshape(*reshape_size1), in2.reshape(*reshape_size2) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_spectral.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..5a404fd2519d62ed30a29631916fdd07092a5967 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_spectral.py @@ -0,0 +1,1582 @@ +""" +Spectral analysis functions and utilities. + +Some of the functions defined here were ported directly from CuSignal under +terms of the MIT license, under the following notice + +Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +""" + +import warnings + +import cupy +from cupyx.scipy.signal.windows._windows import get_window +from cupyx.scipy.signal._spectral_impl import ( + _lombscargle, _spectral_helper, _median_bias, _triage_segments) + + +def lombscargle(x, y, freqs, precenter=False, normalize=False): + """ + lombscargle(x, y, freqs) + + Computes the Lomb-Scargle periodogram. + + The Lomb-Scargle periodogram was developed by Lomb [1]_ and further + extended by Scargle [2]_ to find, and test the significance of weak + periodic signals with uneven temporal sampling. + + When *normalize* is False (default) the computed periodogram + is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic + signal with amplitude A for sufficiently large N. + + When *normalize* is True the computed periodogram is normalized by + the residuals of the data around a constant reference model (at zero). + + Input arrays should be one-dimensional and will be cast to float64. + + Parameters + ---------- + x : array_like + Sample times. + y : array_like + Measurement values. + freqs : array_like + Angular frequencies for output periodogram. + precenter : bool, optional + Pre-center amplitudes by subtracting the mean. + normalize : bool, optional + Compute normalized periodogram. + + Returns + ------- + pgram : array_like + Lomb-Scargle periodogram. + + Raises + ------ + ValueError + If the input arrays `x` and `y` do not have the same shape. + + Notes + ----- + This subroutine calculates the periodogram using a slightly + modified algorithm due to Townsend [3]_ which allows the + periodogram to be calculated using only a single pass through + the input arrays for each frequency. + The algorithm running time scales roughly as O(x * freqs) or O(N^2) + for a large number of samples and frequencies. + + References + ---------- + .. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced + data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976 + .. [2] J.D. Scargle "Studies in astronomical time series analysis. II - + Statistical aspects of spectral analysis of unevenly spaced data", + The Astrophysical Journal, vol 263, pp. 835-853, 1982 + .. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle + periodogram using graphics processing units.", The Astrophysical + Journal Supplement Series, vol 191, pp. 247-253, 2010 + + See Also + -------- + istft: Inverse Short Time Fourier Transform + check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met + welch: Power spectral density by Welch's method + spectrogram: Spectrogram by Welch's method + csd: Cross spectral density by Welch's method + """ + + x = cupy.asarray(x, dtype=cupy.float64) + y = cupy.asarray(y, dtype=cupy.float64) + freqs = cupy.asarray(freqs, dtype=cupy.float64) + pgram = cupy.empty(freqs.shape[0], dtype=cupy.float64) + + assert x.ndim == 1 + assert y.ndim == 1 + assert freqs.ndim == 1 + + # Check input sizes + if x.shape[0] != y.shape[0]: + raise ValueError("Input arrays do not have the same size.") + + y_dot = cupy.zeros(1, dtype=cupy.float64) + if normalize: + cupy.dot(y, y, out=y_dot) + + if precenter: + y_in = y - y.mean() + else: + y_in = y + + _lombscargle(x, y_in, freqs, pgram, y_dot) + + return pgram + + +def periodogram( + x, + fs=1.0, + window="boxcar", + nfft=None, + detrend="constant", + return_onesided=True, + scaling="density", + axis=-1, +): + """ + Estimate power spectral density using a periodogram. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to 'boxcar'. + nfft : int, optional + Length of the FFT used. If `None` the length of `x` will be + used. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Pxx` has units of V**2/Hz and computing the power + spectrum ('spectrum') where `Pxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density' + axis : int, optional + Axis along which the periodogram is computed; the default is + over the last axis (i.e. ``axis=-1``). + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxx : ndarray + Power spectral density or power spectrum of `x`. + + See Also + -------- + welch: Estimate power spectral density using Welch's method + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + """ + x = cupy.asarray(x) + + if x.size == 0: + return cupy.empty(x.shape), cupy.empty(x.shape) + + if window is None: + window = "boxcar" + + if nfft is None: + nperseg = x.shape[axis] + elif nfft == x.shape[axis]: + nperseg = nfft + elif nfft > x.shape[axis]: + nperseg = x.shape[axis] + elif nfft < x.shape[axis]: + # cupy.s_ not implemented + s = [cupy.s_[:]] * len(x.shape) + s[axis] = cupy.s_[:nfft] + x = cupy.asarray(x[tuple(s)]) + nperseg = nfft + nfft = None + + return welch( + x, + fs=fs, + window=window, + nperseg=nperseg, + noverlap=0, + nfft=nfft, + detrend=detrend, + return_onesided=return_onesided, + scaling=scaling, + axis=axis, + ) + + +def welch( + x, + fs=1.0, + window="hann", + nperseg=None, + noverlap=None, + nfft=None, + detrend="constant", + return_onesided=True, + scaling="density", + axis=-1, + average="mean", +): + r""" + Estimate power spectral density using Welch's method. + + Welch's method [1]_ computes an estimate of the power spectral + density by dividing the data into overlapping segments, computing a + modified periodogram for each segment and averaging the + periodograms. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Pxx` has units of V**2/Hz and computing the power + spectrum ('spectrum') where `Pxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density' + axis : int, optional + Axis along which the periodogram is computed; the default is + over the last axis (i.e. ``axis=-1``). + average : { 'mean', 'median' }, optional + Method to use when averaging periodograms. Defaults to 'mean'. + + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxx : ndarray + Power spectral density or power spectrum of x. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + + Notes + ----- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. + + If `noverlap` is 0, this method is equivalent to Bartlett's method + [2]_. + + References + ---------- + .. [1] P. Welch, "The use of the fast Fourier transform for the + estimation of power spectra: A method based on time averaging + over short, modified periodograms", IEEE Trans. Audio + Electroacoust. vol. 15, pp. 70-73, 1967. + .. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika, vol. 37, pp. 1-16, 1950. + """ + + freqs, Pxx = csd( + x, + x, + fs=fs, + window=window, + nperseg=nperseg, + noverlap=noverlap, + nfft=nfft, + detrend=detrend, + return_onesided=return_onesided, + scaling=scaling, + axis=axis, + average=average, + ) + + return freqs, Pxx.real + + +def csd( + x, + y, + fs=1.0, + window="hann", + nperseg=None, + noverlap=None, + nfft=None, + detrend="constant", + return_onesided=True, + scaling="density", + axis=-1, + average="mean", +): + r""" + Estimate the cross power spectral density, Pxy, using Welch's + method. + + Parameters + ---------- + x : array_like + Time series of measurement values + y : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` and `y` time series. Defaults + to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap: int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the cross spectral density ('density') + where `Pxy` has units of V**2/Hz and computing the cross spectrum + ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are + measured in V and `fs` is measured in Hz. Defaults to 'density' + axis : int, optional + Axis along which the CSD is computed for both inputs; the + default is over the last axis (i.e. ``axis=-1``). + average : { 'mean', 'median' }, optional + Method to use when averaging periodograms. Defaults to 'mean'. + + + Returns + ------- + f : ndarray + Array of sample frequencies. + Pxy : ndarray + Cross spectral density or cross power spectrum of x,y. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. [Equivalent to + csd(x,x)] + coherence: Magnitude squared coherence by Welch's method. + + Notes + ----- + By convention, Pxy is computed with the conjugate FFT of X + multiplied by the FFT of Y. + + If the input series differ in length, the shorter series will be + zero-padded to match. + + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. + + """ + x = cupy.asarray(x) + y = cupy.asarray(y) + freqs, _, Pxy = _spectral_helper( + x, + y, + fs, + window, + nperseg, + noverlap, + nfft, + detrend, + return_onesided, + scaling, + axis, + mode="psd", + ) + + # Average over windows. + if len(Pxy.shape) >= 2 and Pxy.size > 0: + if Pxy.shape[-1] > 1: + if average == "median": + Pxy = cupy.median(Pxy, axis=-1) / _median_bias(Pxy.shape[-1]) + elif average == "mean": + Pxy = Pxy.mean(axis=-1) + else: + raise ValueError( + 'average must be "median" or "mean", got %s' % (average,) + ) + else: + Pxy = cupy.reshape(Pxy, Pxy.shape[:-1]) + + return freqs, Pxy + + +def check_COLA(window, nperseg, noverlap, tol=1e-10): + r"""Check whether the Constant OverLap Add (COLA) constraint is met. + + Parameters + ---------- + window : str or tuple or array_like + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + nperseg : int + Length of each segment. + noverlap : int + Number of points to overlap between segments. + tol : float, optional + The allowed variance of a bin's weighted sum from the median bin + sum. + + Returns + ------- + verdict : bool + `True` if chosen combination satisfies COLA within `tol`, + `False` otherwise + + See Also + -------- + check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met + stft: Short Time Fourier Transform + istft: Inverse Short Time Fourier Transform + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, it is sufficient that the signal windowing obeys the constraint of + "Constant OverLap Add" (COLA). This ensures that every point in the input + data is equally weighted, thereby avoiding aliasing and allowing full + reconstruction. + + Some examples of windows that satisfy COLA: + - Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ... + - Bartlett window at overlap of 1/2, 3/4, 5/6, ... + - Hann window at 1/2, 2/3, 3/4, ... + - Any Blackman family window at 2/3 overlap + - Any window with ``noverlap = nperseg-1`` + + A very comprehensive list of other windows may be found in [2]_, + wherein the COLA condition is satisfied when the "Amplitude + Flatness" is unity. See [1]_ for more information. + + References + ---------- + .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K + Publishing, 2011,ISBN 978-0-9745607-3-1. + .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and + spectral density estimation by the Discrete Fourier transform + (DFT), including a comprehensive list of window functions and + some new at-top windows", 2002, + http://hdl.handle.net/11858/00-001M-0000-0013-557A-5 + + """ + nperseg = int(nperseg) + + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg.') + noverlap = int(noverlap) + + if isinstance(window, str) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = cupy.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError('window must have length of nperseg') + + step = nperseg - noverlap + binsums = sum(win[ii * step:(ii + 1) * step] + for ii in range(nperseg//step)) + + if nperseg % step != 0: + binsums[:nperseg % step] += win[-(nperseg % step):] + + deviation = binsums - cupy.median(binsums) + return cupy.max(cupy.abs(deviation)) < tol + + +def check_NOLA(window, nperseg, noverlap, tol=1e-10): + r"""Check whether the Nonzero Overlap Add (NOLA) constraint is met. + + Parameters + ---------- + window : str or tuple or array_like + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + nperseg : int + Length of each segment. + noverlap : int + Number of points to overlap between segments. + tol : float, optional + The allowed variance of a bin's weighted sum from the median bin + sum. + + Returns + ------- + verdict : bool + `True` if chosen combination satisfies the NOLA constraint within + `tol`, `False` otherwise + + See Also + -------- + check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met + stft: Short Time Fourier Transform + istft: Inverse Short Time Fourier Transform + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, the signal windowing must obey the constraint of "nonzero + overlap add" (NOLA): + + .. math:: \sum_{t}w^{2}[n-tH] \ne 0 + + for all :math:`n`, where :math:`w` is the window function, :math:`t` is the + frame index, and :math:`H` is the hop size (:math:`H` = `nperseg` - + `noverlap`). + + This ensures that the normalization factors in the denominator of the + overlap-add inversion equation are not zero. Only very pathological windows + will fail the NOLA constraint. + + See [1]_, [2]_ for more information. + + References + ---------- + .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K + Publishing, 2011,ISBN 978-0-9745607-3-1. + .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and + spectral density estimation by the Discrete Fourier transform + (DFT), including a comprehensive list of window functions and + some new at-top windows", 2002, + http://hdl.handle.net/11858/00-001M-0000-0013-557A-5 + + """ + nperseg = int(nperseg) + + if nperseg < 1: + raise ValueError('nperseg must be a positive integer') + + if noverlap >= nperseg: + raise ValueError('noverlap must be less than nperseg') + if noverlap < 0: + raise ValueError('noverlap must be a nonnegative integer') + noverlap = int(noverlap) + + if isinstance(window, str) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = cupy.asarray(window) + if len(win.shape) != 1: + raise ValueError('window must be 1-D') + if win.shape[0] != nperseg: + raise ValueError('window must have length of nperseg') + + step = nperseg - noverlap + binsums = sum(win[ii * step:(ii + 1) * step] ** 2 + for ii in range(nperseg//step)) + + if nperseg % step != 0: + binsums[:nperseg % step] += win[-(nperseg % step):]**2 + + return cupy.min(binsums) > tol + + +def stft( + x, + fs=1.0, + window="hann", + nperseg=256, + noverlap=None, + nfft=None, + detrend=False, + return_onesided=True, + boundary="zeros", + padded=True, + axis=-1, + scaling='spectrum' +): + r""" + Compute the Short Time Fourier Transform (STFT). + + STFTs can be used as a way of quantifying the change of a + nonstationary signal's frequency and phase content over time. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to 256. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. When + specified, the COLA constraint must be met (see Notes below). + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to `False`. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + boundary : str or None, optional + Specifies whether the input signal is extended at both ends, and + how to generate the new values, in order to center the first + windowed segment on the first input point. This has the benefit + of enabling reconstruction of the first input point when the + employed window function starts at zero. Valid options are + ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to + 'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is + extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``. + padded : bool, optional + Specifies whether the input signal is zero-padded at the end to + make the signal fit exactly into an integer number of window + segments, so that all of the signal is included in the output. + Defaults to `True`. Padding occurs after boundary extension, if + `boundary` is not `None`, and `padded` is `True`, as is the + default. + axis : int, optional + Axis along which the STFT is computed; the default is over the + last axis (i.e. ``axis=-1``). + scaling: {'spectrum', 'psd'} + The default 'spectrum' scaling allows each frequency line of `Zxx` to + be interpreted as a magnitude spectrum. The 'psd' option scales each + line to a power spectral density - it allows to calculate the signal's + energy by numerically integrating over ``abs(Zxx)**2``. + + Returns + ------- + f : ndarray + Array of sample frequencies. + t : ndarray + Array of segment times. + Zxx : ndarray + STFT of `x`. By default, the last axis of `Zxx` corresponds + to the segment times. + + See Also + -------- + welch: Power spectral density by Welch's method. + spectrogram: Spectrogram by Welch's method. + csd: Cross spectral density by Welch's method. + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT in + `istft`, the signal windowing must obey the constraint of "Nonzero + OverLap Add" (NOLA), and the input signal must have complete + windowing coverage (i.e. ``(x.shape[axis] - nperseg) % + (nperseg-noverlap) == 0``). The `padded` argument may be used to + accomplish this. + + Given a time-domain signal :math:`x[n]`, a window :math:`w[n]`, and a hop + size :math:`H` = `nperseg - noverlap`, the windowed frame at time index + :math:`t` is given by + + .. math:: x_{t}[n]=x[n]w[n-tH] + + The overlap-add (OLA) reconstruction equation is given by + + .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]} + + The NOLA constraint ensures that every normalization term that appears + in the denomimator of the OLA reconstruction equation is nonzero. Whether a + choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can + be tested with `check_NOLA`. + + See [1]_, [2]_ for more information. + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from + Modified Short-Time Fourier Transform", IEEE 1984, + 10.1109/TASSP.1984.1164317 + + Examples + -------- + >>> import cupy + >>> import cupyx.scipy.signal import stft + >>> import matplotlib.pyplot as plt + + Generate a test signal, a 2 Vrms sine wave whose frequency is slowly + modulated around 3kHz, corrupted by white noise of exponentially + decreasing magnitude sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2 * cupy.sqrt(2) + >>> noise_power = 0.01 * fs / 2 + >>> time = cupy.arange(N) / float(fs) + >>> mod = 500*cupy.cos(2*cupy.pi*0.25*time) + >>> carrier = amp * cupy.sin(2*cupy.pi*3e3*time + mod) + >>> noise = cupy.random.normal(scale=cupy.sqrt(noise_power), + ... size=time.shape) + >>> noise *= cupy.exp(-time/5) + >>> x = carrier + noise + + Compute and plot the STFT's magnitude. + + >>> f, t, Zxx = stft(x, fs, nperseg=1000) + >>> plt.pcolormesh(cupy.asnumpy(t), cupy.asnumpy(f), + ... cupy.asnumpy(cupy.abs(Zxx)), vmin=0, vmax=amp) + >>> plt.title('STFT Magnitude') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + """ + if scaling == 'psd': + scaling = 'density' + elif scaling != 'spectrum': + raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!") + + freqs, time, Zxx = _spectral_helper( + x, + x, + fs, + window, + nperseg, + noverlap, + nfft, + detrend, + return_onesided, + scaling=scaling, + axis=axis, + mode="stft", + boundary=boundary, + padded=padded, + ) + + return freqs, time, Zxx + + +def istft( + Zxx, + fs=1.0, + window="hann", + nperseg=None, + noverlap=None, + nfft=None, + input_onesided=True, + boundary=True, + time_axis=-1, + freq_axis=-2, + scaling='spectrum' +): + r""" + Perform the inverse Short Time Fourier transform (iSTFT). + + Parameters + ---------- + Zxx : array_like + STFT of the signal to be reconstructed. If a purely real array + is passed, it will be cast to a complex data type. + fs : float, optional + Sampling frequency of the time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. Must match the window used to generate the + STFT for faithful inversion. + nperseg : int, optional + Number of data points corresponding to each STFT segment. This + parameter must be specified if the number of data points per + segment is odd, or if the STFT was padded via ``nfft > + nperseg``. If `None`, the value depends on the shape of + `Zxx` and `input_onesided`. If `input_onesided` is `True`, + ``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise, + ``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`. + noverlap : int, optional + Number of points to overlap between segments. If `None`, half + of the segment length. Defaults to `None`. When specified, the + COLA constraint must be met (see Notes below), and should match + the parameter used to generate the STFT. Defaults to `None`. + nfft : int, optional + Number of FFT points corresponding to each STFT segment. This + parameter must be specified if the STFT was padded via ``nfft > + nperseg``. If `None`, the default values are the same as for + `nperseg`, detailed above, with one exception: if + `input_onesided` is True and + ``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on + that value. This case allows the proper inversion of an + odd-length unpadded STFT using ``nfft=None``. Defaults to + `None`. + input_onesided : bool, optional + If `True`, interpret the input array as one-sided FFTs, such + as is returned by `stft` with ``return_onesided=True`` and + `numpy.fft.rfft`. If `False`, interpret the input as a + two-sided FFT. Defaults to `True`. + boundary : bool, optional + Specifies whether the input signal was extended at its + boundaries by supplying a non-`None` ``boundary`` argument to + `stft`. Defaults to `True`. + time_axis : int, optional + Where the time segments of the STFT is located; the default is + the last axis (i.e. ``axis=-1``). + freq_axis : int, optional + Where the frequency axis of the STFT is located; the default is + the penultimate axis (i.e. ``axis=-2``). + scaling: {'spectrum', 'psd'} + The default 'spectrum' scaling allows each frequency line of `Zxx` to + be interpreted as a magnitude spectrum. The 'psd' option scales each + line to a power spectral density - it allows to calculate the signal's + energy by numerically integrating over ``abs(Zxx)**2``. + + Returns + ------- + t : ndarray + Array of output data times. + x : ndarray + iSTFT of `Zxx`. + + See Also + -------- + stft: Short Time Fourier Transform + check_COLA: Check whether the Constant OverLap Add (COLA) constraint + is met + check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met + + Notes + ----- + In order to enable inversion of an STFT via the inverse STFT with + `istft`, the signal windowing must obey the constraint of "nonzero + overlap add" (NOLA): + + .. math:: \sum_{t}w^{2}[n-tH] \ne 0 + + This ensures that the normalization factors that appear in the denominator + of the overlap-add reconstruction equation + + .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]} + + are not zero. The NOLA constraint can be checked with the `check_NOLA` + function. + + An STFT which has been modified (via masking or otherwise) is not + guaranteed to correspond to a exactly realizible signal. This + function implements the iSTFT via the least-squares estimation + algorithm detailed in [2]_, which produces a signal that minimizes + the mean squared error between the STFT of the returned signal and + the modified STFT. + + See [1]_, [2]_ for more information. + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from + Modified Short-Time Fourier Transform", IEEE 1984, + 10.1109/TASSP.1984.1164317 + + Examples + -------- + >>> import cupy + >>> from cupyx.scipy.signal import stft, istft + >>> import matplotlib.pyplot as plt + + Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by + 0.001 V**2/Hz of white noise sampled at 1024 Hz. + + >>> fs = 1024 + >>> N = 10*fs + >>> nperseg = 512 + >>> amp = 2 * np.sqrt(2) + >>> noise_power = 0.001 * fs / 2 + >>> time = cupy.arange(N) / float(fs) + >>> carrier = amp * cupy.sin(2*cupy.pi*50*time) + >>> noise = cupy.random.normal(scale=cupy.sqrt(noise_power), + ... size=time.shape) + >>> x = carrier + noise + + Compute the STFT, and plot its magnitude + + >>> f, t, Zxx = cusignal.stft(x, fs=fs, nperseg=nperseg) + >>> f = cupy.asnumpy(f) + >>> t = cupy.asnumpy(t) + >>> Zxx = cupy.asnumpy(Zxx) + >>> plt.figure() + >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud') + >>> plt.ylim([f[1], f[-1]]) + >>> plt.title('STFT Magnitude') + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.yscale('log') + >>> plt.show() + + Zero the components that are 10% or less of the carrier magnitude, + then convert back to a time series via inverse STFT + + >>> Zxx = cupy.where(cupy.abs(Zxx) >= amp/10, Zxx, 0) + >>> _, xrec = cusignal.istft(Zxx, fs) + >>> xrec = cupy.asnumpy(xrec) + >>> x = cupy.asnumpy(x) + >>> time = cupy.asnumpy(time) + >>> carrier = cupy.asnumpy(carrier) + + Compare the cleaned signal with the original and true carrier signals. + + >>> plt.figure() + >>> plt.plot(time, x, time, xrec, time, carrier) + >>> plt.xlim([2, 2.1])*+ + >>> plt.xlabel('Time [sec]') + >>> plt.ylabel('Signal') + >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) + >>> plt.show() + + Note that the cleaned signal does not start as abruptly as the original, + since some of the coefficients of the transient were also removed: + + >>> plt.figure() + >>> plt.plot(time, x, time, xrec, time, carrier) + >>> plt.xlim([0, 0.1]) + >>> plt.xlabel('Time [sec]') + >>> plt.ylabel('Signal') + >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) + >>> plt.show() + """ + + # Make sure input is an ndarray of appropriate complex dtype + Zxx = cupy.asarray(Zxx) + 0j + freq_axis = int(freq_axis) + time_axis = int(time_axis) + + if Zxx.ndim < 2: + raise ValueError("Input stft must be at least 2d!") + + if freq_axis == time_axis: + raise ValueError("Must specify differing time and frequency axes!") + + nseg = Zxx.shape[time_axis] + + if input_onesided: + # Assume even segment length + n_default = 2 * (Zxx.shape[freq_axis] - 1) + else: + n_default = Zxx.shape[freq_axis] + + # Check windowing parameters + if nperseg is None: + nperseg = n_default + else: + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError("nperseg must be a positive integer") + + if nfft is None: + if (input_onesided) and (nperseg == n_default + 1): + # Odd nperseg, no FFT padding + nfft = nperseg + else: + nfft = n_default + elif nfft < nperseg: + raise ValueError("nfft must be greater than or equal to nperseg.") + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg // 2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError("noverlap must be less than nperseg.") + nstep = nperseg - noverlap + + # Rearrange axes if necessary + if time_axis != Zxx.ndim - 1 or freq_axis != Zxx.ndim - 2: + # Turn negative indices to positive for the call to transpose + if freq_axis < 0: + freq_axis = Zxx.ndim + freq_axis + if time_axis < 0: + time_axis = Zxx.ndim + time_axis + zouter = list(range(Zxx.ndim)) + for ax in sorted([time_axis, freq_axis], reverse=True): + zouter.pop(ax) + Zxx = cupy.transpose(Zxx, zouter + [freq_axis, time_axis]) + + # Get window as array + if isinstance(window, str) or type(window) is tuple: + win = get_window(window, nperseg) + else: + win = cupy.asarray(window) + if len(win.shape) != 1: + raise ValueError("window must be 1-D") + if win.shape[0] != nperseg: + raise ValueError("window must have length of {0}".format(nperseg)) + + ifunc = cupy.fft.irfft if input_onesided else cupy.fft.ifft + xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :] + + # Initialize output and normalization arrays + outputlength = nperseg + (nseg - 1) * nstep + x = cupy.zeros(list(Zxx.shape[:-2]) + [outputlength], dtype=xsubs.dtype) + norm = cupy.zeros(outputlength, dtype=xsubs.dtype) + + if cupy.result_type(win, xsubs) != xsubs.dtype: + win = win.astype(xsubs.dtype) + + if scaling == 'spectrum': + xsubs *= win.sum() + elif scaling == 'psd': + xsubs *= cupy.sqrt(fs * cupy.sum(win**2)) + else: + raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!") + + for ii in range(nseg): + # Window the ifft + x[..., ii * nstep:ii * nstep + nperseg] += xsubs[..., ii] * win + norm[..., ii * nstep:ii * nstep + nperseg] += win**2 + + # Remove extension points + if boundary: + x = x[..., nperseg // 2: -(nperseg // 2)] + norm = norm[..., nperseg // 2: -(nperseg // 2)] + + # Divide out normalization where non-tiny + if cupy.sum(norm > 1e-10) != len(norm): + warnings.warn("NOLA condition failed, STFT may not be invertible") + x /= cupy.where(norm > 1e-10, norm, 1.0) + + if input_onesided: + x = x.real + + # Put axes back + if x.ndim > 1: + if time_axis != Zxx.ndim - 1: + if freq_axis < time_axis: + time_axis -= 1 + x = cupy.rollaxis(x, -1, time_axis) + + time = cupy.arange(x.shape[0]) / float(fs) + return time, x + + +def spectrogram( + x, + fs=1.0, + window=("tukey", 0.25), + nperseg=None, + noverlap=None, + nfft=None, + detrend="constant", + return_onesided=True, + scaling="density", + axis=-1, + mode="psd", +): + """ + Compute a spectrogram with consecutive Fourier transforms. + + Spectrograms can be used as a way of visualizing the change of a + nonstationary signal's frequency content over time. + + Parameters + ---------- + x : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. + Defaults to a Tukey window with shape parameter of 0.25. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 8``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the power spectral density ('density') + where `Sxx` has units of V**2/Hz and computing the power + spectrum ('spectrum') where `Sxx` has units of V**2, if `x` + is measured in V and `fs` is measured in Hz. Defaults to + 'density'. + axis : int, optional + Axis along which the spectrogram is computed; the default is over + the last axis (i.e. ``axis=-1``). + mode : str, optional + Defines what kind of return values are expected. Options are + ['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is + equivalent to the output of `stft` with no padding or boundary + extension. 'magnitude' returns the absolute magnitude of the + STFT. 'angle' and 'phase' return the complex angle of the STFT, + with and without unwrapping, respectively. + + Returns + ------- + f : ndarray + Array of sample frequencies. + t : ndarray + Array of segment times. + Sxx : ndarray + Spectrogram of x. By default, the last axis of Sxx corresponds + to the segment times. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. + csd: Cross spectral density by Welch's method. + + Notes + ----- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. In contrast to welch's method, where the + entire data stream is averaged over, one may wish to use a smaller + overlap (or perhaps none at all) when computing a spectrogram, to + maintain some statistical independence between individual segments. + It is for this reason that the default window is a Tukey window with + 1/8th of a window's length overlap at each end. See [1]_ for more + information. + + References + ---------- + .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck + "Discrete-Time Signal Processing", Prentice Hall, 1999. + + Examples + -------- + >>> import cupy + >>> from cupyx.scipy.signal import spectrogram + >>> import matplotlib.pyplot as plt + + Generate a test signal, a 2 Vrms sine wave whose frequency is slowly + modulated around 3kHz, corrupted by white noise of exponentially + decreasing magnitude sampled at 10 kHz. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 2 * cupy.sqrt(2) + >>> noise_power = 0.01 * fs / 2 + >>> time = cupy.arange(N) / float(fs) + >>> mod = 500*cupy.cos(2*cupy.pi*0.25*time) + >>> carrier = amp * cupy.sin(2*cupy.pi*3e3*time + mod) + >>> noise = cupy.random.normal( + ... scale=cupy.sqrt(noise_power), size=time.shape) + >>> noise *= cupy.exp(-time/5) + >>> x = carrier + noise + + Compute and plot the spectrogram. + + >>> f, t, Sxx = spectrogram(x, fs) + >>> plt.pcolormesh(cupy.asnumpy(t), cupy.asnumpy(f), cupy.asnumpy(Sxx)) + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + + Note, if using output that is not one sided, then use the following: + + >>> f, t, Sxx = spectrogram(x, fs, return_onesided=False) + >>> plt.pcolormesh(cupy.asnumpy(t), cupy.fft.fftshift(f), \ + cupy.fft.fftshift(Sxx, axes=0)) + >>> plt.ylabel('Frequency [Hz]') + >>> plt.xlabel('Time [sec]') + >>> plt.show() + """ + modelist = ["psd", "complex", "magnitude", "angle", "phase"] + if mode not in modelist: + raise ValueError( + "unknown value for mode {}, must be one of {}".format( + mode, modelist) + ) + + # need to set default for nperseg before setting default for noverlap below + window, nperseg = _triage_segments( + window, nperseg, input_length=x.shape[axis]) + + # Less overlap than welch, so samples are more statisically independent + if noverlap is None: + noverlap = nperseg // 8 + + if mode == "psd": + freqs, time, Sxx = _spectral_helper( + x, + x, + fs, + window, + nperseg, + noverlap, + nfft, + detrend, + return_onesided, + scaling, + axis, + mode="psd", + ) + + else: + freqs, time, Sxx = _spectral_helper( + x, + x, + fs, + window, + nperseg, + noverlap, + nfft, + detrend, + return_onesided, + scaling, + axis, + mode="stft", + ) + + if mode == "magnitude": + Sxx = cupy.abs(Sxx) + elif mode in ["angle", "phase"]: + Sxx = cupy.angle(Sxx) + if mode == "phase": + # Sxx has one additional dimension for time strides + if axis < 0: + axis -= 1 + Sxx = cupy.unwrap(Sxx, axis=axis) + + # mode =='complex' is same as `stft`, doesn't need modification + + return freqs, time, Sxx + + +def coherence( + x, + y, + fs=1.0, + window="hann", + nperseg=None, + noverlap=None, + nfft=None, + detrend="constant", + axis=-1, +): + r""" + Estimate the magnitude squared coherence estimate, Cxy, of + discrete-time signals X and Y using Welch's method. + + ``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power + spectral density estimates of X and Y, and `Pxy` is the cross + spectral density estimate of X and Y. + + Parameters + ---------- + x : array_like + Time series of measurement values + y : array_like + Time series of measurement values + fs : float, optional + Sampling frequency of the `x` and `y` time series. Defaults + to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap: int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + axis : int, optional + Axis along which the coherence is computed for both inputs; the + default is over the last axis (i.e. ``axis=-1``). + + Returns + ------- + f : ndarray + Array of sample frequencies. + Cxy : ndarray + Magnitude squared coherence of x and y. + + See Also + -------- + periodogram: Simple, optionally modified periodogram + lombscargle: Lomb-Scargle periodogram for unevenly sampled data + welch: Power spectral density by Welch's method. + csd: Cross spectral density by Welch's method. + + Notes + ----- + An appropriate amount of overlap will depend on the choice of window + and on your requirements. For the default Hann window an overlap of + 50% is a reasonable trade off between accurately estimating the + signal power, while not over counting any of the data. Narrower + windows may require a larger overlap. See [1]_ and [2]_ for more + information. + + References + ---------- + .. [1] P. Welch, "The use of the fast Fourier transform for the + estimation of power spectra: A method based on time averaging + over short, modified periodograms", IEEE Trans. Audio + Electroacoust. vol. 15, pp. 70-73, 1967. + .. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of + Signals" Prentice Hall, 2005 + + Examples + -------- + >>> import cupy as cp + >>> from cupyx.scipy.signal import butter, lfilter, coherence + >>> import matplotlib.pyplot as plt + + Generate two test signals with some common features. + + >>> fs = 10e3 + >>> N = 1e5 + >>> amp = 20 + >>> freq = 1234.0 + >>> noise_power = 0.001 * fs / 2 + >>> time = cupy.arange(N) / fs + >>> b, a = butter(2, 0.25, 'low') + >>> x = cupy.random.normal( + ... scale=cupy.sqrt(noise_power), size=time.shape) + >>> y = lfilter(b, a, x) + >>> x += amp * cupy.sin(2*cupy.pi*freq*time) + >>> y += cupy.random.normal( + ... scale=0.1*cupy.sqrt(noise_power), size=time.shape) + + Compute and plot the coherence. + + >>> f, Cxy = coherence(x, y, fs, nperseg=1024) + >>> plt.semilogy(cupy.asnumpy(f), cupy.asnumpy(Cxy)) + >>> plt.xlabel('frequency [Hz]') + >>> plt.ylabel('Coherence') + >>> plt.show() + """ + + freqs, Pxx = welch( + x, + fs=fs, + window=window, + nperseg=nperseg, + noverlap=noverlap, + nfft=nfft, + detrend=detrend, + axis=axis, + ) + _, Pyy = welch( + y, + fs=fs, + window=window, + nperseg=nperseg, + noverlap=noverlap, + nfft=nfft, + detrend=detrend, + axis=axis, + ) + _, Pxy = csd( + x, + y, + fs=fs, + window=window, + nperseg=nperseg, + noverlap=noverlap, + nfft=nfft, + detrend=detrend, + axis=axis, + ) + + Cxy = cupy.abs(Pxy) ** 2 / Pxx / Pyy + + return freqs, Cxy + + +def vectorstrength(events, period): + """ + Determine the vector strength of the events corresponding to the given + period. + + The vector strength is a measure of phase synchrony, how well the + timing of the events is synchronized to a single period of a periodic + signal. + + If multiple periods are used, calculate the vector strength of each. + This is called the "resonating vector strength". + + Parameters + ---------- + events : 1D array_like + An array of time points containing the timing of the events. + period : float or array_like + The period of the signal that the events should synchronize to. + The period is in the same units as `events`. It can also be an array + of periods, in which case the outputs are arrays of the same length. + + Returns + ------- + strength : float or 1D array + The strength of the synchronization. 1.0 is perfect synchronization + and 0.0 is no synchronization. If `period` is an array, this is also + an array with each element containing the vector strength at the + corresponding period. + phase : float or array + The phase that the events are most strongly synchronized to in radians. + If `period` is an array, this is also an array with each element + containing the phase for the corresponding period. + + Notes + ----- + See [1]_, [2]_ and [3]_ for more information. + + References + ---------- + .. [1] van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating + vector strength: Auditory system, electric fish, and noise. + Chaos 21, 047508 (2011). + .. [2] van Hemmen, JL. Vector strength after Goldberg, Brown, and + von Mises: biological and mathematical perspectives. Biol Cybern. + 2013 Aug;107(4):385-96. + .. [3] van Hemmen, JL and Vollmayr, AN. Resonating vector strength: + what happens when we vary the "probing" frequency while keeping + the spike times fixed. Biol Cybern. 2013 Aug;107(4):491-94. + """ + events = cupy.asarray(events) + period = cupy.asarray(period) + if events.ndim > 1: + raise ValueError("events cannot have dimensions more than 1") + if period.ndim > 1: + raise ValueError("period cannot have dimensions more than 1") + + # we need to know later if period was originally a scalar + scalarperiod = not period.ndim + + events = cupy.atleast_2d(events) + period = cupy.atleast_2d(period) + if (period <= 0).any(): + raise ValueError("periods must be positive") + + # this converts the times to vectors + vectors = cupy.exp(cupy.dot(2j * cupy.pi / period.T, events)) + + # the vector strength is just the magnitude of the mean of the vectors + # the vector phase is the angle of the mean of the vectors + vectormean = cupy.mean(vectors, axis=1) + strength = cupy.abs(vectormean) + phase = cupy.angle(vectormean) + + # if the original period was a scalar, return scalars + if scalarperiod: + strength = strength[0] + phase = phase[0] + return strength, phase diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_spectral_impl.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_spectral_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..165e79a4b44fafd8b6cc4d9400ced9ad6159933c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_spectral_impl.py @@ -0,0 +1,666 @@ +""" +Spectral analysis functions and utilities. + +Some of the functions defined here were ported directly from CuSignal under +terms of the MIT license, under the following notice: + +Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +""" + +import warnings + +import cupy + +import cupyx.scipy.signal._signaltools as filtering +from cupyx.scipy.signal._arraytools import ( + odd_ext, even_ext, zero_ext, const_ext, _as_strided) +from cupyx.scipy.signal.windows._windows import get_window + + +def _get_raw_typename(dtype): + return cupy.dtype(dtype).name + + +def _get_module_func_raw(module, func_name, *template_args): + args_dtypes = [_get_raw_typename(arg.dtype) for arg in template_args] + template = '_'.join(args_dtypes) + kernel_name = f'{func_name}_{template}' if template_args else func_name + kernel = module.get_function(kernel_name) + return kernel + + +LOMBSCARGLE_KERNEL = r""" + +/////////////////////////////////////////////////////////////////////////////// +// LOMBSCARGLE // +/////////////////////////////////////////////////////////////////////////////// + +template +__device__ void _cupy_lombscargle_float( const int x_shape, + const int freqs_shape, + const T *__restrict__ x, + const T *__restrict__ y, + const T *__restrict__ freqs, + T *__restrict__ pgram, + const T *__restrict__ y_dot ) { + + const int tx { static_cast( blockIdx.x * blockDim.x + threadIdx.x ) }; + const int stride { static_cast( blockDim.x * gridDim.x ) }; + + T yD {}; + if ( y_dot[0] == 0 ) { + yD = 1.0f; + } else { + yD = 2.0f / y_dot[0]; + } + + for ( int tid = tx; tid < freqs_shape; tid += stride ) { + + T freq { freqs[tid] }; + + T xc {}; + T xs {}; + T cc {}; + T ss {}; + T cs {}; + T c {}; + T s {}; + + for ( int j = 0; j < x_shape; j++ ) { + sincosf( freq * x[j], &s, &c ); + xc += y[j] * c; + xs += y[j] * s; + cc += c * c; + ss += s * s; + cs += c * s; + } + + T c_tau {}; + T s_tau {}; + T tau { atan2f( 2.0f * cs, cc - ss ) / ( 2.0f * freq ) }; + sincosf( freq * tau, &s_tau, &c_tau ); + T c_tau2 { c_tau * c_tau }; + T s_tau2 { s_tau * s_tau }; + T cs_tau { 2.0f * c_tau * s_tau }; + + pgram[tid] = ( 0.5f * ( ( ( c_tau * xc + s_tau * xs ) * + ( c_tau * xc + s_tau * xs ) / + ( c_tau2 * cc + cs_tau * cs + s_tau2 * ss ) ) + + ( ( c_tau * xs - s_tau * xc ) * + ( c_tau * xs - s_tau * xc ) / + ( c_tau2 * ss - cs_tau * cs + s_tau2 * cc ) ) ) ) * + yD; + } +} + +extern "C" __global__ void __launch_bounds__( 512 ) _cupy_lombscargle_float32( + const int x_shape, const int freqs_shape, const float *__restrict__ x, + const float *__restrict__ y, const float *__restrict__ freqs, + float *__restrict__ pgram, const float *__restrict__ y_dot ) { + _cupy_lombscargle_float( x_shape, freqs_shape, x, y, + freqs, pgram, y_dot ); +} + +template +__device__ void _cupy_lombscargle_double( const int x_shape, + const int freqs_shape, + const T *__restrict__ x, + const T *__restrict__ y, + const T *__restrict__ freqs, + T *__restrict__ pgram, + const T *__restrict__ y_dot ) { + + const int tx { static_cast( blockIdx.x * blockDim.x + threadIdx.x ) }; + const int stride { static_cast( blockDim.x * gridDim.x ) }; + + T yD {}; + if ( y_dot[0] == 0 ) { + yD = 1.0; + } else { + yD = 2.0 / y_dot[0]; + } + + for ( int tid = tx; tid < freqs_shape; tid += stride ) { + + T freq { freqs[tid] }; + + T xc {}; + T xs {}; + T cc {}; + T ss {}; + T cs {}; + T c {}; + T s {}; + + for ( int j = 0; j < x_shape; j++ ) { + + sincos( freq * x[j], &s, &c ); + xc += y[j] * c; + xs += y[j] * s; + cc += c * c; + ss += s * s; + cs += c * s; + } + + T c_tau {}; + T s_tau {}; + T tau { atan2( 2.0 * cs, cc - ss ) / ( 2.0 * freq ) }; + sincos( freq * tau, &s_tau, &c_tau ); + T c_tau2 { c_tau * c_tau }; + T s_tau2 { s_tau * s_tau }; + T cs_tau { 2.0 * c_tau * s_tau }; + + pgram[tid] = ( 0.5 * ( ( ( c_tau * xc + s_tau * xs ) * + ( c_tau * xc + s_tau * xs ) / + ( c_tau2 * cc + cs_tau * cs + s_tau2 * ss ) ) + + ( ( c_tau * xs - s_tau * xc ) * + ( c_tau * xs - s_tau * xc ) / + ( c_tau2 * ss - cs_tau * cs + s_tau2 * cc ) ) ) ) * + yD; + } +} + +extern "C" __global__ void __launch_bounds__( 512 ) _cupy_lombscargle_float64( + const int x_shape, const int freqs_shape, const double *__restrict__ x, + const double *__restrict__ y, const double *__restrict__ freqs, + double *__restrict__ pgram, const double *__restrict__ y_dot ) { + + _cupy_lombscargle_double( x_shape, freqs_shape, x, y, freqs, + pgram, y_dot ); +} +""" # NOQA + + +LOMBSCARGLE_MODULE = cupy.RawModule( + code=LOMBSCARGLE_KERNEL, options=('-std=c++11',), + name_expressions=['_cupy_lombscargle_float32', + '_cupy_lombscargle_float64']) + + +def _lombscargle(x, y, freqs, pgram, y_dot): + device_id = cupy.cuda.Device() + + num_blocks = device_id.attributes["MultiProcessorCount"] * 20 + block_sz = 512 + lombscargle_kernel = _get_module_func_raw( + LOMBSCARGLE_MODULE, '_cupy_lombscargle', x) + + args = (x.shape[0], freqs.shape[0], x, y, freqs, pgram, y_dot) + lombscargle_kernel((num_blocks,), (block_sz,), args) + + +def _spectral_helper( + x, + y, + fs=1.0, + window="hann", + nperseg=None, + noverlap=None, + nfft=None, + detrend="constant", + return_onesided=True, + scaling="density", + axis=-1, + mode="psd", + boundary=None, + padded=False, +): + """ + Calculate various forms of windowed FFTs for PSD, CSD, etc. + + This is a helper function that implements the commonality between + the stft, psd, csd, and spectrogram functions. It is not designed to + be called externally. The windows are not averaged over; the result + from each window is returned. + + Parameters + --------- + x : array_like + Array or sequence containing the data to be analyzed. + y : array_like + Array or sequence containing the data to be analyzed. If this is + the same object in memory as `x` (i.e. ``_spectral_helper(x, + x, ...)``), the extra computations are spared. + fs : float, optional + Sampling frequency of the time series. Defaults to 1.0. + window : str or tuple or array_like, optional + Desired window to use. If `window` is a string or tuple, it is + passed to `get_window` to generate the window values, which are + DFT-even by default. See `get_window` for a list of windows and + required parameters. If `window` is array_like it will be used + directly as the window and its length must be nperseg. Defaults + to a Hann window. + nperseg : int, optional + Length of each segment. Defaults to None, but if window is str or + tuple, is set to 256, and if window is array_like, is set to the + length of the window. + noverlap : int, optional + Number of points to overlap between segments. If `None`, + ``noverlap = nperseg // 2``. Defaults to `None`. + nfft : int, optional + Length of the FFT used, if a zero padded FFT is desired. If + `None`, the FFT length is `nperseg`. Defaults to `None`. + detrend : str or function or `False`, optional + Specifies how to detrend each segment. If `detrend` is a + string, it is passed as the `type` argument to the `detrend` + function. If it is a function, it takes a segment and returns a + detrended segment. If `detrend` is `False`, no detrending is + done. Defaults to 'constant'. + return_onesided : bool, optional + If `True`, return a one-sided spectrum for real data. If + `False` return a two-sided spectrum. Defaults to `True`, but for + complex data, a two-sided spectrum is always returned. + scaling : { 'density', 'spectrum' }, optional + Selects between computing the cross spectral density ('density') + where `Pxy` has units of V**2/Hz and computing the cross + spectrum ('spectrum') where `Pxy` has units of V**2, if `x` + and `y` are measured in V and `fs` is measured in Hz. + Defaults to 'density' + axis : int, optional + Axis along which the FFTs are computed; the default is over the + last axis (i.e. ``axis=-1``). + mode: str {'psd', 'stft'}, optional + Defines what kind of return values are expected. Defaults to + 'psd'. + boundary : str or None, optional + Specifies whether the input signal is extended at both ends, and + how to generate the new values, in order to center the first + windowed segment on the first input point. This has the benefit + of enabling reconstruction of the first input point when the + employed window function starts at zero. Valid options are + ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to + `None`. + padded : bool, optional + Specifies whether the input signal is zero-padded at the end to + make the signal fit exactly into an integer number of window + segments, so that all of the signal is included in the output. + Defaults to `False`. Padding occurs after boundary extension, if + `boundary` is not `None`, and `padded` is `True`. + + Returns + ------- + freqs : ndarray + Array of sample frequencies. + t : ndarray + Array of times corresponding to each data segment + result : ndarray + Array of output data, contents dependent on *mode* kwarg. + + Notes + ----- + Adapted from matplotlib.mlab + + """ + if mode not in ["psd", "stft"]: + raise ValueError( + f"Unknown value for mode {mode}, must be one of: " + "{'psd', 'stft'}" + ) + + boundary_funcs = { + "even": even_ext, + "odd": odd_ext, + "constant": const_ext, + "zeros": zero_ext, + None: None, + } + + if boundary not in boundary_funcs: + raise ValueError( + "Unknown boundary option '{0}', must be one of: {1}".format( + boundary, list(boundary_funcs.keys()) + ) + ) + + # If x and y are the same object we can save ourselves some computation. + same_data = y is x + + if not same_data and mode != "psd": + raise ValueError("x and y must be equal if mode is 'stft'") + + axis = int(axis) + + # Ensure we have cp.arrays, get outdtype + x = cupy.asarray(x) + if not same_data: + y = cupy.asarray(y) + outdtype = cupy.result_type(x, y, cupy.complex64) + else: + outdtype = cupy.result_type(x, cupy.complex64) + + if not same_data: + # Check if we can broadcast the outer axes together + xouter = list(x.shape) + youter = list(y.shape) + xouter.pop(axis) + youter.pop(axis) + try: + outershape = cupy.broadcast( + cupy.empty(xouter), cupy.empty(youter)).shape + except ValueError: + raise ValueError("x and y cannot be broadcast together.") + + if same_data: + if x.size == 0: + return ( + cupy.empty(x.shape), cupy.empty(x.shape), cupy.empty(x.shape)) + else: + if x.size == 0 or y.size == 0: + outshape = outershape + (min([x.shape[axis], y.shape[axis]]),) + emptyout = cupy.rollaxis(cupy.empty(outshape), -1, axis) + return emptyout, emptyout, emptyout + + if x.ndim > 1: + if axis != -1: + x = cupy.rollaxis(x, axis, len(x.shape)) + if not same_data and y.ndim > 1: + y = cupy.rollaxis(y, axis, len(y.shape)) + + # Check if x and y are the same length, zero-pad if necessary + if not same_data: + if x.shape[-1] != y.shape[-1]: + if x.shape[-1] < y.shape[-1]: + pad_shape = list(x.shape) + pad_shape[-1] = y.shape[-1] - x.shape[-1] + x = cupy.concatenate((x, cupy.zeros(pad_shape)), -1) + else: + pad_shape = list(y.shape) + pad_shape[-1] = x.shape[-1] - y.shape[-1] + y = cupy.concatenate((y, cupy.zeros(pad_shape)), -1) + + if nperseg is not None: # if specified by user + nperseg = int(nperseg) + if nperseg < 1: + raise ValueError("nperseg must be a positive integer") + + # parse window; if array like, then set nperseg = win.shape + win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[-1]) + + if nfft is None: + nfft = nperseg + elif nfft < nperseg: + raise ValueError("nfft must be greater than or equal to nperseg.") + else: + nfft = int(nfft) + + if noverlap is None: + noverlap = nperseg // 2 + else: + noverlap = int(noverlap) + if noverlap >= nperseg: + raise ValueError("noverlap must be less than nperseg.") + nstep = nperseg - noverlap + + # Padding occurs after boundary extension, so that the extended signal ends + # in zeros, instead of introducing an impulse at the end. + # I.e. if x = [..., 3, 2] + # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0] + # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3] + + if boundary is not None: + ext_func = boundary_funcs[boundary] + x = ext_func(x, nperseg // 2, axis=-1) + if not same_data: + y = ext_func(y, nperseg // 2, axis=-1) + + if padded: + # Pad to integer number of windowed segments + # I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg + nadd = (-(x.shape[-1] - nperseg) % nstep) % nperseg + zeros_shape = list(x.shape[:-1]) + [nadd] + x = cupy.concatenate((x, cupy.zeros(zeros_shape)), axis=-1) + if not same_data: + zeros_shape = list(y.shape[:-1]) + [nadd] + y = cupy.concatenate((y, cupy.zeros(zeros_shape)), axis=-1) + + # Handle detrending and window functions + if not detrend: + + def detrend_func(d): + return d + + elif not hasattr(detrend, "__call__"): + + def detrend_func(d): + return filtering.detrend(d, type=detrend, axis=-1) + + elif axis != -1: + # Wrap this function so that it receives a shape that it could + # reasonably expect to receive. + def detrend_func(d): + d = cupy.rollaxis(d, -1, axis) + d = detrend(d) + return cupy.rollaxis(d, axis, len(d.shape)) + + else: + detrend_func = detrend + + if cupy.result_type(win, cupy.complex64) != outdtype: + win = win.astype(outdtype) + + if scaling == "density": + scale = 1.0 / (fs * (win * win).sum()) + elif scaling == "spectrum": + scale = 1.0 / win.sum() ** 2 + else: + raise ValueError("Unknown scaling: %r" % scaling) + + if mode == "stft": + scale = cupy.sqrt(scale) + + if return_onesided: + if cupy.iscomplexobj(x): + sides = "twosided" + warnings.warn( + "Input data is complex, switching to " + "return_onesided=False" + ) + else: + sides = "onesided" + if not same_data: + if cupy.iscomplexobj(y): + sides = "twosided" + warnings.warn( + "Input data is complex, switching to " + "return_onesided=False" + ) + else: + sides = "twosided" + + if sides == "twosided": + freqs = cupy.fft.fftfreq(nfft, 1 / fs) + elif sides == "onesided": + freqs = cupy.fft.rfftfreq(nfft, 1 / fs) + + # Perform the windowed FFTs + result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides) + + if not same_data: + # All the same operations on the y data + result_y = _fft_helper(y, win, detrend_func, # NOQA + nperseg, noverlap, nfft, sides) + result = cupy.conj(result) * result_y + elif mode == "psd": + result = cupy.conj(result) * result + + result *= scale + if sides == "onesided" and mode == "psd": + if nfft % 2: + result[..., 1:] *= 2 + else: + # Last point is unpaired Nyquist freq point, don't double + result[..., 1:-1] *= 2 + + time = cupy.arange( + nperseg / 2, x.shape[-1] - nperseg / 2 + 1, nperseg - noverlap + ) / float(fs) + if boundary is not None: + time -= (nperseg / 2) / fs + + result = result.astype(outdtype) + + # All imaginary parts are zero anyways + if same_data and mode != "stft": + result = result.real + + # Output is going to have new last axis for time/window index, so a + # negative axis index shifts down one + if axis < 0: + axis -= 1 + + # Roll frequency axis back to axis where the data came from + result = cupy.rollaxis(result, -1, axis) + + return freqs, time, result + + +def _triage_segments(window, nperseg, input_length): + """ + Parses window and nperseg arguments for spectrogram and _spectral_helper. + This is a helper function, not meant to be called externally. + + Parameters + ---------- + window : string, tuple, or ndarray + If window is specified by a string or tuple and nperseg is not + specified, nperseg is set to the default of 256 and returns a window of + that length. + If instead the window is array_like and nperseg is not specified, then + nperseg is set to the length of the window. A ValueError is raised if + the user supplies both an array_like window and a value for nperseg but + nperseg does not equal the length of the window. + + nperseg : int + Length of each segment + + input_length: int + Length of input signal, i.e. x.shape[-1]. Used to test for errors. + + Returns + ------- + win : ndarray + window. If function was called with string or tuple than this will hold + the actual array used as a window. + + nperseg : int + Length of each segment. If window is str or tuple, nperseg is set to + 256. If window is array_like, nperseg is set to the length of the + 6 + window. + """ + + # parse window; if array like, then set nperseg = win.shape + if isinstance(window, str) or isinstance(window, tuple): + # if nperseg not specified + if nperseg is None: + nperseg = 256 # then change to default + if nperseg > input_length: + warnings.warn( + "nperseg = {0:d} is greater than input length " + " = {1:d}, using nperseg = {1:d}".format(nperseg, input_length) + ) + nperseg = input_length + win = get_window(window, nperseg) + else: + win = cupy.asarray(window) + if len(win.shape) != 1: + raise ValueError("window must be 1-D") + if input_length < win.shape[-1]: + raise ValueError("window is longer than input signal") + if nperseg is None: + nperseg = win.shape[0] + elif nperseg is not None: + if nperseg != win.shape[0]: + raise ValueError( + "value specified for nperseg is different" + " from length of window" + ) + return win, nperseg + + +def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides): + """ + Calculate windowed FFT, for internal use by + cusignal.spectral_analysis.spectral._spectral_helper + + This is a helper function that does the main FFT calculation for + `_spectral helper`. All input validation is performed there, and the + data axis is assumed to be the last axis of x. It is not designed to + be called externally. The windows are not averaged over; the result + from each window is returned. + + Returns + ------- + result : ndarray + Array of FFT data + + Notes + ----- + Adapted from matplotlib.mlab + + """ + # Created strided array of data segments + if nperseg == 1 and noverlap == 0: + result = x[..., cupy.newaxis] + else: + # https://stackoverflow.com/a/5568169 + step = nperseg - noverlap + shape = x.shape[:-1] + ((x.shape[-1] - noverlap) // step, nperseg) + strides = x.strides[:-1] + (step * x.strides[-1], x.strides[-1]) + # Need to optimize this in cuSignal + result = _as_strided(x, shape=shape, strides=strides) + + # Detrend each data segment individually + result = detrend_func(result) + + # Apply window by multiplication + result = win * result + + # Perform the fft. Acts on last axis by default. Zero-pads automatically + if sides == "twosided": + func = cupy.fft.fft + else: + result = result.real + func = cupy.fft.rfft + result = func(result, n=nfft) + + return result + + +def _median_bias(n): + """ + Returns the bias of the median of a set of periodograms relative to + the mean. + + See arXiv:gr-qc/0509116 Appendix B for details. + + Parameters + ---------- + n : int + Numbers of periodograms being averaged. + + Returns + ------- + bias : float + Calculated bias. + """ + ii_2 = 2 * cupy.arange(1.0, (n - 1) // 2 + 1) + return 1 + cupy.sum(1.0 / (ii_2 + 1) - 1.0 / ii_2) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_splines.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_splines.py new file mode 100644 index 0000000000000000000000000000000000000000..ded40b8bb97e4a2bead1047f918317fc68761264 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_splines.py @@ -0,0 +1,503 @@ + +import cupy +from cupy._core._scalar import get_typename +from cupy._core.internal import _normalize_axis_index + +from cupyx.scipy.signal._signaltools import lfilter +from cupyx.scipy.signal._arraytools import ( + axis_slice, axis_assign, axis_reverse) +from cupyx.scipy.signal._iir_utils import collapse_2d, apply_iir_sos + + +SYMIIR2_KERNEL = r""" +#include +#include + +template +__device__ T _compute_symiirorder2_fwd_hc( + const int k, const T cs, const T r, const T omega) { + T base; + + if(k < 0) { + return 0; + } + + if(omega == 0.0) { + base = cs * pow(r, ((T) k)) * (k + 1); + } else if(omega == M_PI) { + base = cs * pow(r, ((T) k)) * (k + 1) * (1 - 2 * (k % 2)); + } else { + base = (cs * pow(r, ((T) k)) * sin(omega * (k + 1)) / + sin(omega)); + } + return base; +} + +template +__global__ void compute_symiirorder2_fwd_sc( + const int n, const int off, const T* cs_ptr, const T* r_ptr, + const T* omega_ptr, const double precision, bool* valid, T* out) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx + off >= n) { + return; + } + + const T cs = cs_ptr[0]; + const T r = r_ptr[0]; + const T omega = omega_ptr[0]; + + T val = _compute_symiirorder2_fwd_hc(idx + off + 1, cs, r, omega); + T err = val * val; + + out[idx] = val; + valid[idx] = err <= precision; +} + +template +__device__ T _compute_symiirorder2_bwd_hs( + const int ki, const T cs, const T rsq, const T omega) { + T c0; + T gamma; + + T cssq = cs * cs; + int k = abs(ki); + T rsupk = pow(rsq, ((T) k) / ((T) 2.0)); + + + if(omega == 0.0) { + c0 = (1 + rsq) / ((1 - rsq) * (1 - rsq) * (1 - rsq)) * cssq; + gamma = (1 - rsq) / (1 + rsq); + return c0 * rsupk * (1 + gamma * k); + } + + if(omega == M_PI) { + c0 = (1 + rsq) / ((1 - rsq) * (1 - rsq) * (1 - rsq)) * cssq; + gamma = (1 - rsq) / (1 + rsq) * (1 - 2 * (k % 2)); + return c0 * rsupk * (1 + gamma * k); + } + + c0 = (cssq * (1.0 + rsq) / (1.0 - rsq) / + (1 - 2 * rsq * cos(2 * omega) + rsq * rsq)); + gamma = (1.0 - rsq) / (1.0 + rsq) / tan(omega); + return c0 * rsupk * (cos(omega * k) + gamma * sin(omega * k)); +} + +template +__global__ void compute_symiirorder2_bwd_sc( + const int n, const int off, const int l_off, const int r_off, + const T* cs_ptr, const T* rsq_ptr, const T* omega_ptr, + const double precision, bool* valid, T* out) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx + off >= n) { + return; + } + + const T cs = cs_ptr[0]; + const T rsq = rsq_ptr[0]; + const T omega = omega_ptr[0]; + + T v1 = _compute_symiirorder2_bwd_hs(idx + l_off + off, cs, rsq, omega); + T v2 = _compute_symiirorder2_bwd_hs(idx + r_off + off, cs, rsq, omega); + + T diff = v1 + v2; + T err = diff * diff; + out[idx] = diff; + valid[idx] = err <= precision; +} +""" + +SYMIIR2_MODULE = cupy.RawModule( + code=SYMIIR2_KERNEL, options=('-std=c++11',), + name_expressions=[f'compute_symiirorder2_bwd_sc<{t}>' + for t in ['float', 'double']] + + [f'compute_symiirorder2_fwd_sc<{t}>' + for t in ['float', 'double']]) + + +def _get_module_func(module, func_name, *template_args): + args_dtypes = [get_typename(arg.dtype) for arg in template_args] + template = ', '.join(args_dtypes) + kernel_name = f'{func_name}<{template}>' if template_args else func_name + kernel = module.get_function(kernel_name) + return kernel + + +def _find_initial_cond(all_valid, cum_poly, n, off=0, axis=-1): + indices = cupy.where(all_valid)[0] + 1 + off + zi = cupy.nan + if indices.size > 0: + zi = cupy.where( + indices[0] >= n, cupy.nan, + axis_slice(cum_poly, indices[0] - 1 - off, + indices[0] - off, axis=axis)) + return zi + + +def _symiirorder1_nd(input, c0, z1, precision=-1.0, axis=-1): + axis = _normalize_axis_index(axis, input.ndim) + input_shape = input.shape + input_ndim = input.ndim + if input.ndim > 1: + input, input_shape = collapse_2d(input, axis) + + if cupy.abs(z1) >= 1: + raise ValueError('|z1| must be less than 1.0') + + if precision <= 0.0 or precision > 1.0: + if input.dtype is cupy.dtype(cupy.float64): + precision = 1e-6 + elif input.dtype is cupy.dtype(cupy.float32): + precision = 1e-3 + else: + precision = 10 ** -cupy.finfo(input.dtype).iexp + + precision *= precision + pos = cupy.arange(1, input_shape[-1] + 1, dtype=input.dtype) + pow_z1 = z1 ** pos + + diff = pow_z1 * cupy.conjugate(pow_z1) + cum_poly = cupy.cumsum( + pow_z1 * input, axis=-1) + axis_slice(input, 0, 1, axis=-1) + + # cupy.expand_dims(input_2d[:, 0], -1) + all_valid = diff <= precision + + zi = _find_initial_cond(all_valid, cum_poly, input_shape[-1]) + + if cupy.any(cupy.isnan(zi)): + raise ValueError( + 'Sum to find symmetric boundary conditions did not converge.') + + # Apply first the system 1 / (1 - z1 * z^-1) + zi_shape = (1, 4) + if input_ndim > 1: + zi_shape = (1, input.shape[0], 4) + + all_zi = cupy.zeros(zi_shape, dtype=input.dtype) + all_zi = axis_assign(all_zi, zi, 3, 4) + + coef = cupy.r_[1, 0, 0, 1, -z1, 0] + coef = cupy.atleast_2d(coef) + + y1, _ = apply_iir_sos(axis_slice(input, 1), coef, zi=all_zi, + dtype=input.dtype, apply_fir=False) + y1 = cupy.c_[zi, y1] + + # Compute backward symmetric condition and apply the system + # c0 / (1 - z1 * z) + zi = -c0 / (z1 - 1.0) * axis_slice(y1, -1) + all_zi = axis_assign(all_zi, zi, 3, 4) + + coef = cupy.r_[c0, 0, 0, 1, -z1, 0] + coef = cupy.atleast_2d(coef) + + out, _ = apply_iir_sos( + axis_slice(y1, -2, step=-1), coef, zi=all_zi, dtype=input.dtype) + + if input_ndim > 1: + out = cupy.c_[axis_reverse(out), zi] + else: + out = cupy.r_[axis_reverse(out), zi] + + if input_ndim > 1: + out = out.reshape(input_shape) + out = cupy.moveaxis(out, -1, axis) + if not out.flags.c_contiguous: + out = out.copy() + return out + + +def symiirorder1(input, c0, z1, precision=-1.0): + """ + Implement a smoothing IIR filter with mirror-symmetric boundary conditions + using a cascade of first-order sections. The second section uses a + reversed sequence. This implements a system with the following + transfer function and mirror-symmetric boundary conditions:: + + c0 + H(z) = --------------------- + (1-z1/z) (1 - z1 z) + + The resulting signal will have mirror symmetric boundary conditions + as well. + + Parameters + ---------- + input : ndarray + The input signal. + c0, z1 : scalar + Parameters in the transfer function. + precision : + Specifies the precision for calculating initial conditions + of the recursive filter based on mirror-symmetric input. + + Returns + ------- + output : ndarray + The filtered signal. + """ + c0 = cupy.asarray([c0], input.dtype) + z1 = cupy.asarray([z1], input.dtype) + + if cupy.abs(z1) >= 1: + raise ValueError('|z1| must be less than 1.0') + + if precision <= 0.0 or precision > 1.0: + precision = cupy.finfo(input.dtype).resolution + + precision *= precision + pos = cupy.arange(1, input.size + 1, dtype=input.dtype) + pow_z1 = z1 ** pos + + diff = pow_z1 * cupy.conjugate(pow_z1) + cum_poly = cupy.cumsum(pow_z1 * input) + input[0] + all_valid = diff <= precision + + zi = _find_initial_cond(all_valid, cum_poly, input.size) + + if cupy.isnan(zi): + raise ValueError( + 'Sum to find symmetric boundary conditions did not converge.') + + a = cupy.r_[1, -z1] + a = a.astype(input.dtype) + + # Apply first the system 1 / (1 - z1 * z^-1) + y1, _ = lfilter( + cupy.ones(1, dtype=input.dtype), a, input[1:], zi=zi) + y1 = cupy.r_[zi, y1] + + # Compute backward symmetric condition and apply the system + # c0 / (1 - z1 * z) + zi = -c0 / (z1 - 1.0) * y1[-1] + a = cupy.r_[1, -z1] + a = a.astype(input.dtype) + + out, _ = lfilter(c0, a, y1[:-1][::-1], zi=zi) + return cupy.r_[out[::-1], zi] + + +def _compute_symiirorder2_fwd_hc(k, cs, r, omega): + base = None + if omega == 0.0: + base = cs * cupy.power(r, k) * (k+1) + elif omega == cupy.pi: + base = cs * cupy.power(r, k) * (k + 1) * (1 - 2 * (k % 2)) + else: + base = (cs * cupy.power(r, k) * cupy.sin(omega * (k + 1)) / + cupy.sin(omega)) + return cupy.where(k < 0, 0.0, base) + + +def _compute_symiirorder2_bwd_hs(k, cs, rsq, omega): + cssq = cs * cs + k = cupy.abs(k) + rsupk = cupy.power(rsq, k / 2.0) + + if omega == 0.0: + c0 = (1 + rsq) / ((1 - rsq) * (1 - rsq) * (1 - rsq)) * cssq + gamma = (1 - rsq) / (1 + rsq) + return c0 * rsupk * (1 + gamma * k) + + if omega == cupy.pi: + c0 = (1 + rsq) / ((1 - rsq) * (1 - rsq) * (1 - rsq)) * cssq + gamma = (1 - rsq) / (1 + rsq) * (1 - 2 * (k % 2)) + return c0 * rsupk * (1 + gamma * k) + + c0 = (cssq * (1.0 + rsq) / (1.0 - rsq) / + (1 - 2 * rsq * cupy.cos(2 * omega) + rsq * rsq)) + gamma = (1.0 - rsq) / (1.0 + rsq) / cupy.tan(omega) + return c0 * rsupk * (cupy.cos(omega * k) + gamma * cupy.sin(omega * k)) + + +def _symiirorder2_nd(input, r, omega, precision=-1.0, axis=-1): + if r >= 1.0: + raise ValueError('r must be less than 1.0') + + if precision <= 0.0 or precision > 1.0: + if input.dtype is cupy.dtype(cupy.float64): + precision = 1e-11 + elif input.dtype is cupy.dtype(cupy.float32): + precision = 1e-6 + else: + precision = 10 ** -cupy.finfo(input.dtype).iexp + + axis = _normalize_axis_index(axis, input.ndim) + input_shape = input.shape + input_ndim = input.ndim + if input.ndim > 1: + input, input_shape = collapse_2d(input, axis) + + block_sz = 128 + rsq = r * r + a2 = 2 * r * cupy.cos(omega) + a3 = -rsq + cs = cupy.atleast_1d(1 - 2 * r * cupy.cos(omega) + rsq) + omega = cupy.asarray(omega, cs.dtype) + r = cupy.asarray(r, cs.dtype) + rsq = cupy.asarray(rsq, cs.dtype) + + precision *= precision + + # First compute the symmetric forward starting conditions + compute_symiirorder2_fwd_sc = _get_module_func( + SYMIIR2_MODULE, 'compute_symiirorder2_fwd_sc', cs) + + diff = cupy.empty((block_sz + 1,), dtype=cs.dtype) + all_valid = cupy.empty((block_sz + 1,), dtype=cupy.bool_) + + starting_diff = cupy.arange(2, dtype=input.dtype) + starting_diff = _compute_symiirorder2_fwd_hc(starting_diff, cs, r, omega) + + y0 = cupy.nan + y1 = cupy.nan + + for i in range(0, input.shape[-1] + 2, block_sz): + compute_symiirorder2_fwd_sc( + (1,), (block_sz + 1,), ( + input.shape[-1] + 2, i, cs, r, omega, precision, all_valid, + diff)) + + input_slice = axis_slice(input, i, i + block_sz) + diff_y0 = diff[:-1][:input_slice.shape[-1]] + diff_y1 = diff[1:][:input_slice.shape[-1]] + + if cupy.isnan(y0): + cum_poly_y0 = cupy.cumsum(diff_y0 * input_slice, axis=-1) + ( + starting_diff[0] * axis_slice(input, 0, 1)) + y0 = _find_initial_cond( + all_valid[:-1][:input_slice.shape[-1]], cum_poly_y0, + input.shape[-1], i) + + if cupy.isnan(y1): + cum_poly_y1 = (cupy.cumsum(diff_y1 * input_slice, axis=-1) + + starting_diff[0] * axis_slice(input, 1, 2) + + starting_diff[1] * axis_slice(input, 0, 1)) + y1 = _find_initial_cond( + all_valid[1:][:input_slice.shape[-1]], cum_poly_y1, + input.shape[-1], i) + + if not cupy.any(cupy.isnan(cupy.r_[y0, y1])): + break + + if cupy.any(cupy.isnan(cupy.r_[y0, y1])): + raise ValueError( + 'Sum to find symmetric boundary conditions did not converge.') + + # Apply the system cs / (1 - a2 * z^-1 - a3 * z^-2) + zi_shape = (1, 4) + if input_ndim > 1: + zi_shape = (1, input.shape[0], 4) + + sos = cupy.atleast_2d(cupy.r_[cs, 0, 0, 1, -a2, -a3]) + sos = sos.astype(input.dtype) + + all_zi = cupy.zeros(zi_shape, dtype=input.dtype) + all_zi = axis_assign(all_zi, y0, 2, 3) + all_zi = axis_assign(all_zi, y1, 3, 4) + + y_fwd, _ = apply_iir_sos( + axis_slice(input, 2), sos, zi=all_zi, dtype=input.dtype) + if input_ndim > 1: + y_fwd = cupy.c_[y0, y1, y_fwd] + else: + y_fwd = cupy.r_[y0, y1, y_fwd] + + # Then compute the symmetric backward starting conditions + compute_symiirorder2_bwd_sc = _get_module_func( + SYMIIR2_MODULE, 'compute_symiirorder2_bwd_sc', cs) + + diff = cupy.empty((block_sz,), dtype=cs.dtype) + all_valid = cupy.empty((block_sz,), dtype=cupy.bool_) + rev_input = axis_reverse(input) + y0 = cupy.nan + + for i in range(0, input.shape[-1] + 1, block_sz): + compute_symiirorder2_bwd_sc( + (1,), (block_sz,), ( + input.shape[-1] + 1, i, 0, 1, cs, cupy.asarray(rsq, cs.dtype), + cupy.asarray(omega, cs.dtype), precision, all_valid, diff)) + + input_slice = axis_slice(rev_input, i, i + block_sz) + cum_poly_y0 = cupy.cumsum(diff[:input_slice.shape[-1]] * input_slice, + axis=-1) + y0 = _find_initial_cond( + all_valid[:input_slice.shape[-1]], cum_poly_y0, input.shape[-1], i) + if not cupy.any(cupy.isnan(y0)): + break + + if cupy.any(cupy.isnan(y0)): + raise ValueError( + 'Sum to find symmetric boundary conditions did not converge.') + + y1 = cupy.nan + for i in range(0, input.shape[-1] + 1, block_sz): + compute_symiirorder2_bwd_sc( + (1,), (block_sz,), ( + input.size + 1, i, -1, 2, cs, cupy.asarray(rsq, cs.dtype), + cupy.asarray(omega, cs.dtype), precision, all_valid, diff)) + + input_slice = axis_slice(rev_input, i, i + block_sz) + cum_poly_y1 = cupy.cumsum(diff[:input_slice.shape[-1]] * input_slice, + axis=-1) + y1 = _find_initial_cond( + all_valid[:input_slice.size], cum_poly_y1, input.size, i) + if not cupy.any(cupy.isnan(y1)): + break + + if cupy.any(cupy.isnan(y1)): + raise ValueError( + 'Sum to find symmetric boundary conditions did not converge.') + + all_zi = axis_assign(all_zi, y0, 2, 3) + all_zi = axis_assign(all_zi, y1, 3, 4) + + out, _ = apply_iir_sos(axis_slice(y_fwd, -3, step=-1), sos, zi=all_zi) + + if input_ndim > 1: + out = cupy.c_[axis_reverse(out), y1, y0] + else: + out = cupy.r_[axis_reverse(out), y1, y0] + + if input_ndim > 1: + out = out.reshape(input_shape) + out = cupy.moveaxis(out, -1, axis) + if not out.flags.c_contiguous: + out = out.copy() + return out + + +def symiirorder2(input, r, omega, precision=-1.0): + """ + Implement a smoothing IIR filter with mirror-symmetric boundary conditions + using a cascade of second-order sections. The second section uses a + reversed sequence. This implements the following transfer function:: + + cs^2 + H(z) = --------------------------------------- + (1 - a2/z - a3/z^2) (1 - a2 z - a3 z^2 ) + + where:: + + a2 = 2 * r * cos(omega) + a3 = - r ** 2 + cs = 1 - 2 * r * cos(omega) + r ** 2 + + Parameters + ---------- + input : ndarray + The input signal. + r, omega : float + Parameters in the transfer function. + precision : float + Specifies the precision for calculating initial conditions + of the recursive filter based on mirror-symmetric input. + + Returns + ------- + output : ndarray + The filtered signal. + """ + return _symiirorder2_nd(input, r, omega, precision) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_upfirdn.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_upfirdn.py new file mode 100644 index 0000000000000000000000000000000000000000..377109fce0b66ac347dfff78f022166bfcc78758 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_upfirdn.py @@ -0,0 +1,502 @@ +""" +upfirdn implementation. + +Functions defined here were ported directly from cuSignal under +terms of the MIT license, under the following notice: + +Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +""" + +from math import ceil +import cupy + +_upfirdn_modes = [ + 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect', + 'antisymmetric', 'antireflect', 'line', +] + + +UPFIRDN_KERNEL = r''' +#include + +/////////////////////////////////////////////////////////////////////////////// +// UPFIRDN1D // +/////////////////////////////////////////////////////////////////////////////// + +template +__device__ void _cupy_upfirdn1D( const T *__restrict__ inp, + const T *__restrict__ h_trans_flip, + const int up, + const int down, + const int axis, + const int x_shape_a, + const int h_per_phase, + const int padded_len, + T *__restrict__ out, + const int outW ) { + + const int t { static_cast( blockIdx.x * blockDim.x + threadIdx.x ) }; + const int stride { static_cast( blockDim.x * gridDim.x ) }; + + for ( size_t tid = t; tid < outW; tid += stride ) { + +#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 ) + __builtin_assume( padded_len > 0 ); + __builtin_assume( up > 0 ); + __builtin_assume( down > 0 ); + __builtin_assume( tid > 0 ); +#endif + + const int x_idx { static_cast( ( tid * down ) / up ) % padded_len }; + int h_idx { static_cast( ( tid * down ) % up * h_per_phase ) }; + int x_conv_idx { x_idx - h_per_phase + 1 }; + + if ( x_conv_idx < 0 ) { + h_idx -= x_conv_idx; + x_conv_idx = 0; + } + + T temp {}; + + int stop = ( x_shape_a < ( x_idx + 1 ) ) ? x_shape_a : ( x_idx + 1 ); + + for ( int x_c = x_conv_idx; x_c < stop; x_c++ ) { + temp += inp[x_c] * h_trans_flip[h_idx]; + h_idx += 1; + } + out[tid] = temp; + } +} + +extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_float32( const float *__restrict__ inp, + const float *__restrict__ h_trans_flip, + const int up, + const int down, + const int axis, + const int x_shape_a, + const int h_per_phase, + const int padded_len, + float *__restrict__ out, + const int outW ) { + _cupy_upfirdn1D( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); +} + +extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_float64( const double *__restrict__ inp, + const double *__restrict__ h_trans_flip, + const int up, + const int down, + const int axis, + const int x_shape_a, + const int h_per_phase, + const int padded_len, + double *__restrict__ out, + const int outW ) { + _cupy_upfirdn1D( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); +} + +extern "C" __global__ void __launch_bounds__( 512 ) + _cupy_upfirdn1D_complex64( const thrust::complex *__restrict__ inp, + const thrust::complex *__restrict__ h_trans_flip, + const int up, + const int down, + const int axis, + const int x_shape_a, + const int h_per_phase, + const int padded_len, + thrust::complex *__restrict__ out, + const int outW ) { + _cupy_upfirdn1D>( + inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); +} + +extern "C" __global__ void __launch_bounds__( 512 ) + _cupy_upfirdn1D_complex128( const thrust::complex *__restrict__ inp, + const thrust::complex *__restrict__ h_trans_flip, + const int up, + const int down, + const int axis, + const int x_shape_a, + const int h_per_phase, + const int padded_len, + thrust::complex *__restrict__ out, + const int outW ) { + _cupy_upfirdn1D>( + inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); +} + +/////////////////////////////////////////////////////////////////////////////// +// UPFIRDN2D // +/////////////////////////////////////////////////////////////////////////////// + +template +__device__ void _cupy_upfirdn2D( const T *__restrict__ inp, + const int inpH, + const T *__restrict__ h_trans_flip, + const int up, + const int down, + const int axis, + const int x_shape_a, + const int h_per_phase, + const int padded_len, + T *__restrict__ out, + const int outW, + const int outH ) { + + const int ty { static_cast( blockIdx.x * blockDim.x + threadIdx.x ) }; + const int tx { static_cast( blockIdx.y * blockDim.y + threadIdx.y ) }; + + const int stride_y { static_cast( blockDim.x * gridDim.x ) }; + const int stride_x { static_cast( blockDim.y * gridDim.y ) }; + + for ( int x = tx; x < outH; x += stride_x ) { + for ( int y = ty; y < outW; y += stride_y ) { + int x_idx {}; + int h_idx {}; + +#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 ) + __builtin_assume( padded_len > 0 ); + __builtin_assume( up > 0 ); + __builtin_assume( down > 0 ); +#endif + + if ( axis == 1 ) { +#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 ) + __builtin_assume( x > 0 ); +#endif + x_idx = ( static_cast( x * down ) / up ) % padded_len; + h_idx = ( x * down ) % up * h_per_phase; + } else { +#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 ) + __builtin_assume( y > 0 ); +#endif + x_idx = ( static_cast( y * down ) / up ) % padded_len; + h_idx = ( y * down ) % up * h_per_phase; + } + + int x_conv_idx { x_idx - h_per_phase + 1 }; + if ( x_conv_idx < 0 ) { + h_idx -= x_conv_idx; + x_conv_idx = 0; + } + + T temp {}; + + int stop = ( x_shape_a < ( x_idx + 1 ) ) ? x_shape_a : ( x_idx + 1 ); + + for ( int x_c = x_conv_idx; x_c < stop; x_c++ ) { + if ( axis == 1 ) { + temp += inp[y * inpH + x_c] * h_trans_flip[h_idx]; + } else { + temp += inp[x_c * inpH + x] * h_trans_flip[h_idx]; + } + h_idx += 1; + } + out[y * outH + x] = temp; + } + } +} + +extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_float32( const float *__restrict__ inp, + const int inpH, + const float *__restrict__ h_trans_flip, + const int up, + const int down, + const int axis, + const int x_shape_a, + const int h_per_phase, + const int padded_len, + float *__restrict__ out, + const int outW, + const int outH ) { + _cupy_upfirdn2D( + inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); +} + +extern "C" __global__ void _cupy_upfirdn2D_float64( const double *__restrict__ inp, + const int inpH, + const double *__restrict__ h_trans_flip, + const int up, + const int down, + const int axis, + const int x_shape_a, + const int h_per_phase, + const int padded_len, + double *__restrict__ out, + const int outW, + const int outH ) { + _cupy_upfirdn2D( + inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); +} + +extern "C" __global__ void __launch_bounds__( 64 ) + _cupy_upfirdn2D_complex64( const thrust::complex *__restrict__ inp, + const int inpH, + const thrust::complex *__restrict__ h_trans_flip, + const int up, + const int down, + const int axis, + const int x_shape_a, + const int h_per_phase, + const int padded_len, + thrust::complex *__restrict__ out, + const int outW, + const int outH ) { + _cupy_upfirdn2D>( + inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); +} + +extern "C" __global__ void __launch_bounds__( 64 ) + _cupy_upfirdn2D_complex128( const thrust::complex *__restrict__ inp, + const int inpH, + const thrust::complex *__restrict__ h_trans_flip, + const int up, + const int down, + const int axis, + const int x_shape_a, + const int h_per_phase, + const int padded_len, + thrust::complex *__restrict__ out, + const int outW, + const int outH ) { + _cupy_upfirdn2D>( + inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); +} +''' # NOQA + + +UPFIRDN_MODULE = cupy.RawModule( + code=UPFIRDN_KERNEL, options=('-std=c++11',), + name_expressions=[ + '_cupy_upfirdn1D_float32', + '_cupy_upfirdn1D_float64', + '_cupy_upfirdn1D_complex64', + '_cupy_upfirdn1D_complex128', + '_cupy_upfirdn2D_float32', + '_cupy_upfirdn2D_float64', + '_cupy_upfirdn2D_complex64', + '_cupy_upfirdn2D_complex128', + ]) + + +def _pad_h(h, up): + """Store coefficients in a transposed, flipped arrangement. + For example, suppose upRate is 3, and the + input number of coefficients is 10, represented as h[0], ..., h[9]. + Then the internal buffer will look like this:: + h[9], h[6], h[3], h[0], // flipped phase 0 coefs + 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded) + 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded) + """ + h_padlen = len(h) + (-len(h) % up) + h_full = cupy.zeros(h_padlen, h.dtype) + h_full[: len(h)] = h + h_full = h_full.reshape(-1, up).T[:, ::-1].ravel() + return h_full + + +def _output_len(len_h, in_len, up, down): + return (((in_len - 1) * up + len_h) - 1) // down + 1 + + +# These three _get_* functions are vendored from +# https://github.com/rapidsai/cusignal/blob/branch-23.08/python/cusignal/utils/helper_tools.py#L55 +def _get_max_gdx(): + device_id = cupy.cuda.Device() + return device_id.attributes["MaxGridDimX"] + + +def _get_max_gdy(): + device_id = cupy.cuda.Device() + return device_id.attributes["MaxGridDimY"] + + +def _get_tpb_bpg(): + device_id = cupy.cuda.Device() + numSM = device_id.attributes["MultiProcessorCount"] + threadsperblock = 512 + blockspergrid = numSM * 20 + + return threadsperblock, blockspergrid + + +class _UpFIRDn(object): + def __init__(self, h, x_dtype, up, down): + """Helper for resampling""" + h = cupy.asarray(h) + if h.ndim != 1 or h.size == 0: + raise ValueError("h must be 1D with non-zero length") + + self._output_type = cupy.result_type(h.dtype, x_dtype, cupy.float32) + h = cupy.asarray(h, self._output_type) + self._up = int(up) + self._down = int(down) + if self._up < 1 or self._down < 1: + raise ValueError("Both up and down must be >= 1") + # This both transposes, and "flips" each phase for filtering + self._h_trans_flip = _pad_h(h, self._up) + self._h_trans_flip = cupy.asarray(self._h_trans_flip) + self._h_trans_flip = cupy.ascontiguousarray(self._h_trans_flip) + self._h_len_orig = len(h) + + def apply_filter( + self, + x, + axis, + ): + """Apply the prepared filter to the specified axis of a nD signal x""" + + x = cupy.asarray(x, self._output_type) + + output_len = _output_len( + self._h_len_orig, x.shape[axis], self._up, self._down) + output_shape = list(x.shape) + output_shape[axis] = output_len + out = cupy.empty(output_shape, dtype=self._output_type, order="C") + axis = axis % x.ndim + + # Precompute variables on CPU + x_shape_a = x.shape[axis] + h_per_phase = len(self._h_trans_flip) // self._up + padded_len = x.shape[axis] + (len(self._h_trans_flip) // self._up) - 1 + + if out.ndim == 1: + + threadsperblock, blockspergrid = _get_tpb_bpg() + + kernel = UPFIRDN_MODULE.get_function( + f'_cupy_upfirdn1D_{out.dtype.name}') + kernel(((x.shape[0] + 128 - 1) // 128,), (128,), + (x, + self._h_trans_flip, + self._up, + self._down, + axis, + x_shape_a, + h_per_phase, + padded_len, + out, + out.shape[0] + ) + ) + + elif out.ndim == 2: + # set up the kernel launch parameters + threadsperblock = (8, 8) + blocks = ceil(out.shape[0] / threadsperblock[0]) + blockspergrid_x = ( + blocks if blocks < _get_max_gdx() else _get_max_gdx()) + + blocks = ceil(out.shape[1] / threadsperblock[1]) + blockspergrid_y = ( + blocks if blocks < _get_max_gdy() else _get_max_gdy()) + + blockspergrid = (blockspergrid_x, blockspergrid_y) + + # do computations + kernel = UPFIRDN_MODULE.get_function( + f'_cupy_upfirdn2D_{out.dtype.name}') + kernel(threadsperblock, blockspergrid, + (x, + x.shape[1], + self._h_trans_flip, + self._up, + self._down, + axis, + x_shape_a, + h_per_phase, + padded_len, + out, + out.shape[0], + out.shape[1] + ) + ) + else: + raise NotImplementedError("upfirdn() requires ndim <= 2") + + return out + + +def upfirdn( + h, + x, + up=1, + down=1, + axis=-1, + mode="constant", + cval=0 +): + """ + Upsample, FIR filter, and downsample. + + Parameters + ---------- + h : array_like + 1-dimensional FIR (finite-impulse response) filter coefficients. + x : array_like + Input signal array. + up : int, optional + Upsampling rate. Default is 1. + down : int, optional + Downsampling rate. Default is 1. + axis : int, optional + The axis of the input data array along which to apply the + linear filter. The filter is applied to each subarray along + this axis. Default is -1. + mode : str, optional + This parameter is not implemented for values other than ``"constant"``. + cval : float, optional + This parameter is not implemented for values other than 0. + + Returns + ------- + y : ndarray + The output signal array. Dimensions will be the same as `x` except + for along `axis`, which will change size according to the `h`, + `up`, and `down` parameters. + + Notes + ----- + The algorithm is an implementation of the block diagram shown on page 129 + of the Vaidyanathan text [1]_ (Figure 4.3-8d). + + The direct approach of upsampling by factor of P with zero insertion, + FIR filtering of length ``N``, and downsampling by factor of Q is + O(N*Q) per output sample. The polyphase implementation used here is + O(N/P). + + See Also + -------- + scipy.signal.upfirdn + + References + ---------- + .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks, + Prentice Hall, 1993. + """ + if mode is None: + mode = "constant" # For backwards compatibility + if mode != "constant" or cval != 0: + raise NotImplementedError(f"{mode=} and {cval=} not implemented.") + + ufd = _UpFIRDn(h, x.dtype, int(up), int(down)) + # This is equivalent to (but faster than) using cp.apply_along_axis + return ufd.apply_filter(x, axis) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_waveforms.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_waveforms.py new file mode 100644 index 0000000000000000000000000000000000000000..29f78caf18118cc24dc61b46d3b438290ef9abc4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_waveforms.py @@ -0,0 +1,680 @@ + +""" +Waveform-generating functions. + +Some of the functions defined here were ported directly from CuSignal under +terms of the MIT license, under the following notice: + +Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +""" + +import cupy +from cupy._core._scalar import get_typename +from cupy_backends.cuda.api import runtime + +import numpy as np + + +def _get_typename(dtype): + typename = get_typename(dtype) + if cupy.dtype(dtype).kind == 'c': + typename = 'thrust::' + typename + elif typename == 'float16': + if runtime.is_hip: + # 'half' in name_expressions weirdly raises + # HIPRTC_ERROR_NAME_EXPRESSION_NOT_VALID in getLoweredName() on + # ROCm + typename = '__half' + else: + typename = 'half' + return typename + + +FLOAT_TYPES = [cupy.float16, cupy.float32, cupy.float64] +INT_TYPES = [cupy.int8, cupy.int16, cupy.int32, cupy.int64] +UNSIGNED_TYPES = [cupy.uint8, cupy.uint16, cupy.uint32, cupy.uint64] +COMPLEX_TYPES = [cupy.complex64, cupy.complex128] +TYPES = FLOAT_TYPES + INT_TYPES + UNSIGNED_TYPES + COMPLEX_TYPES # type: ignore # NOQA +TYPE_NAMES = [_get_typename(t) for t in TYPES] + + +def _get_module_func(module, func_name, *template_args): + args_dtypes = [_get_typename(arg.dtype) for arg in template_args] + template = ', '.join(args_dtypes) + kernel_name = f'{func_name}<{template}>' if template_args else func_name + kernel = module.get_function(kernel_name) + return kernel + + +_sawtooth_kernel = cupy.ElementwiseKernel( + "T t, T w", + "float64 y", + """ + double out {}; + const bool mask1 { ( ( w > 1 ) || ( w < 0 ) ) }; + if ( mask1 ) { + out = nan("0xfff8000000000000ULL"); + } + + const T tmod { fmod( t, 2.0 * M_PI ) }; + const bool mask2 { ( ( 1 - mask1 ) && ( tmod < ( w * 2.0 * M_PI ) ) ) }; + + if ( mask2 ) { + out = tmod / ( M_PI * w ) - 1; + } + + const bool mask3 { ( ( 1 - mask1 ) && ( 1 - mask2 ) ) }; + if ( mask3 ) { + out = ( M_PI * ( w + 1 ) - tmod ) / ( M_PI * ( 1 - w ) ); + } + y = out; + """, + "_sawtooth_kernel", + options=("-std=c++11",), +) + + +def sawtooth(t, width=1.0): + """ + Return a periodic sawtooth or triangle waveform. + + The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the + interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval + ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1]. + + Note that this is not band-limited. It produces an infinite number + of harmonics, which are aliased back and forth across the frequency + spectrum. + + Parameters + ---------- + t : array_like + Time. + width : array_like, optional + Width of the rising ramp as a proportion of the total cycle. + Default is 1, producing a rising ramp, while 0 produces a falling + ramp. `width` = 0.5 produces a triangle wave. + If an array, causes wave shape to change over time, and must be the + same length as t. + + Returns + ------- + y : ndarray + Output array containing the sawtooth waveform. + + Examples + -------- + A 5 Hz waveform sampled at 500 Hz for 1 second: + + >>> from cupyx.scipy import signal + >>> import matplotlib.pyplot as plt + >>> t = np.linspace(0, 1, 500) + >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t)) + """ + t, w = cupy.asarray(t), cupy.asarray(width) + y = _sawtooth_kernel(t, w) + return y + + +_square_kernel = cupy.ElementwiseKernel( + "T t, T w", + "float64 y", + """ + const bool mask1 { ( ( w > 1 ) || ( w < 0 ) ) }; + if ( mask1 ) { + y = nan("0xfff8000000000000ULL"); + } + + const T tmod { fmod( t, 2.0 * M_PI ) }; + const bool mask2 { ( ( 1 - mask1 ) && ( tmod < ( w * 2.0 * M_PI ) ) ) }; + + if ( mask2 ) { + y = 1; + } + + const bool mask3 { ( ( 1 - mask1 ) && ( 1 - mask2 ) ) }; + if ( mask3 ) { + y = -1; + } + + """, + "_square_kernel", + options=("-std=c++11",), +) + + +def square(t, duty=0.5): + """ + Return a periodic square-wave waveform. + + The square wave has a period ``2*pi``, has value +1 from 0 to + ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in + the interval [0,1]. + + Note that this is not band-limited. It produces an infinite number + of harmonics, which are aliased back and forth across the frequency + spectrum. + + Parameters + ---------- + t : array_like + The input time array. + duty : array_like, optional + Duty cycle. Default is 0.5 (50% duty cycle). + If an array, causes wave shape to change over time, and must be the + same length as t. + + Returns + ------- + y : ndarray + Output array containing the square waveform. + + Examples + -------- + A 5 Hz waveform sampled at 500 Hz for 1 second: + + >>> import cupyx.scipy.signal + >>> import cupy as cp + >>> import matplotlib.pyplot as plt + >>> t = cupy.linspace(0, 1, 500, endpoint=False) + >>> plt.plot(cupy.asnumpy(t), cupy.asnumpy(cupyx.scipy.signal.square(2 * cupy.pi * 5 * t))) + >>> plt.ylim(-2, 2) + + A pulse-width modulated sine wave: + + >>> plt.figure() + >>> sig = cupy.sin(2 * cupy.pi * t) + >>> pwm = cupyx.scipy.signal.square(2 * cupy.pi * 30 * t, duty=(sig + 1)/2) + >>> plt.subplot(2, 1, 1) + >>> plt.plot(cupy.asnumpy(t), cupy.asnumpy(sig)) + >>> plt.subplot(2, 1, 2) + >>> plt.plot(cupy.asnumpy(t), cupy.asnumpy(pwm)) + >>> plt.ylim(-1.5, 1.5) + + """ # NOQA + t, w = cupy.asarray(t), cupy.asarray(duty) + y = _square_kernel(t, w) + return y + + +_gausspulse_kernel_F_F = cupy.ElementwiseKernel( + "T t, T a, T fc", + "T yI", + """ + T yenv = exp(-a * t * t); + yI = yenv * cos( 2 * M_PI * fc * t); + """, + "_gausspulse_kernel", + options=("-std=c++11",), +) + +_gausspulse_kernel_F_T = cupy.ElementwiseKernel( + "T t, T a, T fc", + "T yI, T yenv", + """ + yenv = exp(-a * t * t); + yI = yenv * cos( 2 * M_PI * fc * t); + """, + "_gausspulse_kernel", + options=("-std=c++11",), +) + +_gausspulse_kernel_T_F = cupy.ElementwiseKernel( + "T t, T a, T fc", + "T yI, T yQ", + """ + T yenv { exp(-a * t * t) }; + + T l_yI {}; + T l_yQ {}; + sincos(2 * M_PI * fc * t, &l_yQ, &l_yI); + yI = yenv * l_yI; + yQ = yenv * l_yQ; + """, + "_gausspulse_kernel", + options=("-std=c++11",), +) + +_gausspulse_kernel_T_T = cupy.ElementwiseKernel( + "T t, T a, T fc", + "T yI, T yQ, T yenv", + """ + yenv = exp(-a * t * t); + + T l_yI {}; + T l_yQ {}; + sincos(2 * M_PI * fc * t, &l_yQ, &l_yI); + yI = yenv * l_yI; + yQ = yenv * l_yQ; + """, + "_gausspulse_kernel", + options=("-std=c++11",), +) + + +def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, + retenv=False): + """ + Return a Gaussian modulated sinusoid: + + ``exp(-a t^2) exp(1j*2*pi*fc*t).`` + + If `retquad` is True, then return the real and imaginary parts + (in-phase and quadrature). + If `retenv` is True, then return the envelope (unmodulated signal). + Otherwise, return the real part of the modulated sinusoid. + + Parameters + ---------- + t : ndarray or the string 'cutoff' + Input array. + fc : int, optional + Center frequency (e.g. Hz). Default is 1000. + bw : float, optional + Fractional bandwidth in frequency domain of pulse (e.g. Hz). + Default is 0.5. + bwr : float, optional + Reference level at which fractional bandwidth is calculated (dB). + Default is -6. + tpr : float, optional + If `t` is 'cutoff', then the function returns the cutoff + time for when the pulse amplitude falls below `tpr` (in dB). + Default is -60. + retquad : bool, optional + If True, return the quadrature (imaginary) as well as the real part + of the signal. Default is False. + retenv : bool, optional + If True, return the envelope of the signal. Default is False. + + Returns + ------- + yI : ndarray + Real part of signal. Always returned. + yQ : ndarray + Imaginary part of signal. Only returned if `retquad` is True. + yenv : ndarray + Envelope of signal. Only returned if `retenv` is True. + + See Also + -------- + cupyx.scipy.signal.morlet + + Examples + -------- + Plot real component, imaginary component, and envelope for a 5 Hz pulse, + sampled at 100 Hz for 2 seconds: + + >>> import cupyx.scipy.signal + >>> import cupy as cp + >>> import matplotlib.pyplot as plt + >>> t = cupy.linspace(-1, 1, 2 * 100, endpoint=False) + >>> i, q, e = cupyx.scipy.signal.gausspulse(t, fc=5, retquad=True, retenv=True) + >>> plt.plot(cupy.asnumpy(t), cupy.asnumpy(i), cupy.asnumpy(t), cupy.asnumpy(q), + cupy.asnumpy(t), cupy.asnumpy(e), '--') + + """ # NOQA + if fc < 0: + raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc) + if bw <= 0: + raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw) + if bwr >= 0: + raise ValueError( + "Reference level for bandwidth (bwr=%.2f) must " "be < 0 dB" % bwr + ) + + # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f) + + ref = pow(10.0, bwr / 20.0) + # fdel = fc*bw/2: g(fdel) = ref --- solve this for a + # + # pi^2/a * fc^2 * bw^2 /4=-log(ref) + a = -((np.pi * fc * bw) ** 2) / (4.0 * np.log(ref)) + + if isinstance(t, str): + if t == "cutoff": # compute cut_off point + # Solve exp(-a tc**2) = tref for tc + # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20) + if tpr >= 0: + raise ValueError( + "Reference level for time cutoff must " "be < 0 dB") + tref = pow(10.0, tpr / 20.0) + return np.sqrt(-np.log(tref) / a) + else: + raise ValueError("If `t` is a string, it must be 'cutoff'") + + t = cupy.asarray(t) + + if not retquad and not retenv: + return _gausspulse_kernel_F_F(t, a, fc) + if not retquad and retenv: + return _gausspulse_kernel_F_T(t, a, fc) + if retquad and not retenv: + return _gausspulse_kernel_T_F(t, a, fc) + if retquad and retenv: + return _gausspulse_kernel_T_T(t, a, fc) + + +_chirp_phase_lin_kernel_real = cupy.ElementwiseKernel( + "T t, T f0, T t1, T f1, T phi", + "T phase", + """ + const T beta { (f1 - f0) / t1 }; + const T temp { 2 * M_PI * (f0 * t + 0.5 * beta * t * t) }; + // Convert phi to radians. + phase = cos(temp + phi); + """, + "_chirp_phase_lin_kernel", + options=("-std=c++11",), +) + +_chirp_phase_lin_kernel_cplx = cupy.ElementwiseKernel( + "T t, T f0, T t1, T f1, T phi", + "Y phase", + """ + const T beta { (f1 - f0) / t1 }; + const T temp { 2 * M_PI * (f0 * t + 0.5 * beta * t * t) }; + // Convert phi to radians. + phase = Y(cos(temp + phi), cos(temp + phi + M_PI/2) * -1); + """, + "_chirp_phase_lin_kernel", + options=("-std=c++11",), +) + +_chirp_phase_quad_kernel = cupy.ElementwiseKernel( + "T t, T f0, T t1, T f1, T phi, bool vertex_zero", + "T phase", + """ + T temp {}; + const T beta { (f1 - f0) / (t1 * t1) }; + if ( vertex_zero ) { + temp = 2 * M_PI * (f0 * t + beta * (t * t * t) / 3); + } else { + temp = 2 * M_PI * + ( f1 * t + beta * + ( ( (t1 - t) * (t1 - t) * (t1 - t) ) - (t1 * t1 * t1)) / 3); + } + // Convert phi to radians. + phase = cos(temp + phi); + """, + "_chirp_phase_quad_kernel", + options=("-std=c++11",), +) + +_chirp_phase_log_kernel = cupy.ElementwiseKernel( + "T t, T f0, T t1, T f1, T phi", + "T phase", + """ + T temp {}; + if ( f0 == f1 ) { + temp = 2 * M_PI * f0 * t; + } else { + T beta { t1 / log(f1 / f0) }; + temp = 2 * M_PI * beta * f0 * ( pow(f1 / f0, t / t1) - 1.0 ); + } + // Convert phi to radians. + phase = cos(temp + phi); + """, + "_chirp_phase_log_kernel", + options=("-std=c++11",), +) + +_chirp_phase_hyp_kernel = cupy.ElementwiseKernel( + "T t, T f0, T t1, T f1, T phi", + "T phase", + """ + T temp {}; + if ( f0 == f1 ) { + temp = 2 * M_PI * f0 * t; + } else { + T sing { -f1 * t1 / (f0 - f1) }; + temp = 2 * M_PI * ( -sing * f0 ) * log( abs( 1 - t / sing ) ); + } + // Convert phi to radians. + phase = cos(temp + phi); + """, + "_chirp_phase_hyp_kernel", + options=("-std=c++11",), +) + + +def chirp(t, f0, t1, f1, method="linear", phi=0, vertex_zero=True): + """Frequency-swept cosine generator. + + In the following, 'Hz' should be interpreted as 'cycles per unit'; + there is no requirement here that the unit is one second. The + important distinction is that the units of rotation are cycles, not + radians. Likewise, `t` could be a measurement of space instead of time. + + Parameters + ---------- + t : array_like + Times at which to evaluate the waveform. + f0 : float + Frequency (e.g. Hz) at time t=0. + t1 : float + Time at which `f1` is specified. + f1 : float + Frequency (e.g. Hz) of the waveform at time `t1`. + method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional + Kind of frequency sweep. If not given, `linear` is assumed. See + Notes below for more details. + phi : float, optional + Phase offset, in degrees. Default is 0. + vertex_zero : bool, optional + This parameter is only used when `method` is 'quadratic'. + It determines whether the vertex of the parabola that is the graph + of the frequency is at t=0 or t=t1. + + Returns + ------- + y : ndarray + A numpy array containing the signal evaluated at `t` with the + requested time-varying frequency. More precisely, the function + returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral + (from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below. + + Examples + -------- + The following will be used in the examples: + + >>> from cupyx.scipy.signal import chirp, spectrogram + >>> import matplotlib.pyplot as plt + >>> import cupy as cp + + For the first example, we'll plot the waveform for a linear chirp + from 6 Hz to 1 Hz over 10 seconds: + + >>> t = cupy.linspace(0, 10, 5001) + >>> w = chirp(t, f0=6, f1=1, t1=10, method='linear') + >>> plt.plot(cupy.asnumpy(t), cupy.asnumpy(w)) + >>> plt.title("Linear Chirp, f(0)=6, f(10)=1") + >>> plt.xlabel('t (sec)') + >>> plt.show() + + For the remaining examples, we'll use higher frequency ranges, + and demonstrate the result using `cupyx.scipy.signal.spectrogram`. + We'll use a 10 second interval sampled at 8000 Hz. + + >>> fs = 8000 + >>> T = 10 + >>> t = cupy.linspace(0, T, T*fs, endpoint=False) + + Quadratic chirp from 1500 Hz to 250 Hz over 10 seconds + (vertex of the parabolic curve of the frequency is at t=0): + + >>> w = chirp(t, f0=1500, f1=250, t1=10, method='quadratic') + >>> ff, tt, Sxx = spectrogram(w, fs=fs, noverlap=256, nperseg=512, + ... nfft=2048) + >>> plt.pcolormesh(cupy.asnumpy(tt), cupy.asnumpy(ff[:513]), + cupy.asnumpy(Sxx[:513]), cmap='gray_r') + >>> plt.title('Quadratic Chirp, f(0)=1500, f(10)=250') + >>> plt.xlabel('t (sec)') + >>> plt.ylabel('Frequency (Hz)') + >>> plt.grid() + >>> plt.show() + """ + t = cupy.asarray(t) + + if cupy.issubdtype(t.dtype, cupy.integer): + t = t.astype(cupy.float64) + + phi *= np.pi / 180 + type = 'real' + + if method in ["linear", "lin", "li"]: + if type == "real": + return _chirp_phase_lin_kernel_real(t, f0, t1, f1, phi) + elif type == "complex": + # type hard-coded to 'real' above, so this code path is never used + if t.real.dtype.kind == 'f' and t.dtype.itemsize == 8: + phase = cupy.empty(t.shape, dtype=cupy.complex128) + else: + phase = cupy.empty(t.shape, dtype=cupy.complex64) + _chirp_phase_lin_kernel_cplx(t, f0, t1, f1, phi, phase) + return phase + else: + raise NotImplementedError("No kernel for type {}".format(type)) + + elif method in ["quadratic", "quad", "q"]: + return _chirp_phase_quad_kernel(t, f0, t1, f1, phi, vertex_zero) + + elif method in ["logarithmic", "log", "lo"]: + if f0 * f1 <= 0.0: + raise ValueError( + "For a logarithmic chirp, f0 and f1 must be " + "nonzero and have the same sign." + ) + return _chirp_phase_log_kernel(t, f0, t1, f1, phi) + + elif method in ["hyperbolic", "hyp"]: + if f0 == 0 or f1 == 0: + raise ValueError( + "For a hyperbolic chirp, f0 and f1 must be " "nonzero.") + return _chirp_phase_hyp_kernel(t, f0, t1, f1, phi) + + else: + raise ValueError( + "method must be 'linear', 'quadratic', 'logarithmic'," + " or 'hyperbolic', but a value of %r was given." % method + ) + + +UNIT_KERNEL = r''' +#include +#include +#include + + +template +__global__ void unit_impulse(const int n, const int iidx, T* out) { + const int idx = blockIdx.x * blockDim.x + threadIdx.x; + + if(idx >= n) { + return; + } + + if(idx == iidx) { + out[idx] = 1; + } else { + out[idx] = 0; + } +} +''' + +UNIT_MODULE = cupy.RawModule( + code=UNIT_KERNEL, options=('-std=c++11',), + name_expressions=[f'unit_impulse<{x}>' for x in TYPE_NAMES]) + + +def unit_impulse(shape, idx=None, dtype=float): + """ + Unit impulse signal (discrete delta function) or unit basis vector. + + Parameters + ---------- + shape : int or tuple of int + Number of samples in the output (1-D), or a tuple that represents the + shape of the output (N-D). + idx : None or int or tuple of int or 'mid', optional + Index at which the value is 1. If None, defaults to the 0th element. + If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in + all dimensions. If an int, the impulse will be at `idx` in all + dimensions. + dtype : data-type, optional + The desired data-type for the array, e.g., ``numpy.int8``. Default is + ``numpy.float64``. + + Returns + ------- + y : ndarray + Output array containing an impulse signal. + + Notes + ----- + The 1D case is also known as the Kronecker delta. + + Examples + -------- + An impulse at the 0th element (:math:`\\delta[n]`): + + >>> import cupyx.scipy.signal + >>> import cupy as cp + >>> cupyx.scipy.signal.unit_impulse(8) + array([ 1., 0., 0., 0., 0., 0., 0., 0.]) + + Impulse offset by 2 samples (:math:`\\delta[n-2]`): + + >>> cupyx.scipy.signal.unit_impulse(7, 2) + array([ 0., 0., 1., 0., 0., 0., 0.]) + + 2-dimensional impulse, centered: + + >>> cupyx.scipy.signal.unit_impulse((3, 3), 'mid') + array([[ 0., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 0.]]) + + Impulse at (2, 2), using broadcasting: + + >>> cupyx.scipy.signal.unit_impulse((4, 4), 2) + array([[ 0., 0., 0., 0.], + [ 0., 0., 0., 0.], + [ 0., 0., 1., 0.], + [ 0., 0., 0., 0.]]) + """ + out = cupy.empty(shape, dtype) + shape = np.atleast_1d(shape) + + if idx is None: + idx = (0,) * len(shape) + elif idx == 'mid': + idx = tuple(shape // 2) + elif not hasattr(idx, "__iter__"): + idx = (idx,) * len(shape) + + pos = np.ravel_multi_index(idx, out.shape) + + n = out.size + block_sz = 128 + n_blocks = (n + block_sz - 1) // block_sz + + unit_impulse_kernel = _get_module_func(UNIT_MODULE, 'unit_impulse', out) + unit_impulse_kernel((n_blocks,), (block_sz,), (n, pos, out)) + return out diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_wavelets.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_wavelets.py new file mode 100644 index 0000000000000000000000000000000000000000..79787c61593005098ab6cb224e553b15c9a5ec80 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/_wavelets.py @@ -0,0 +1,347 @@ + +""" +Wavelet-generating functions. + +Some of the functions defined here were ported directly from CuSignal under +terms of the MIT license, under the following notice: + +Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +""" + +import cupy +import numpy as np + +from cupyx.scipy.signal._signaltools import convolve + + +_qmf_kernel = cupy.ElementwiseKernel( + "raw T coef", + "T output", + """ + const int sign { ( i & 1 ) ? -1 : 1 }; + output = ( coef[_ind.size() - ( i + 1 )] ) * sign; + """, + "_qmf_kernel", + options=("-std=c++11",), +) + + +def qmf(hk): + """ + Return high-pass qmf filter from low-pass + + Parameters + ---------- + hk : array_like + Coefficients of high-pass filter. + + """ + hk = cupy.asarray(hk) + return _qmf_kernel(hk, size=len(hk)) + + +_morlet_kernel = cupy.ElementwiseKernel( + "float64 w, float64 s, bool complete", + "complex128 output", + """ + const double x { start + delta * i }; + + thrust::complex temp { exp( + thrust::complex( 0, w * x ) ) }; + + if ( complete ) { + temp -= exp( -0.5 * ( w * w ) ); + } + + output = temp * exp( -0.5 * ( x * x ) ) * pow( M_PI, -0.25 ) + """, + "_morlet_kernel", + options=("-std=c++11",), + loop_prep="const double end { s * 2.0 * M_PI }; \ + const double start { -s * 2.0 * M_PI }; \ + const double delta { ( end - start ) / ( _ind.size() - 1 ) };", +) + + +def morlet(M, w=5.0, s=1.0, complete=True): + """ + Complex Morlet wavelet. + + Parameters + ---------- + M : int + Length of the wavelet. + w : float, optional + Omega0. Default is 5 + s : float, optional + Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1. + complete : bool, optional + Whether to use the complete or the standard version. + + Returns + ------- + morlet : (M,) ndarray + + See Also + -------- + cupyx.scipy.signal.gausspulse + + Notes + ----- + The standard version:: + + pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2)) + + This commonly used wavelet is often referred to simply as the + Morlet wavelet. Note that this simplified version can cause + admissibility problems at low values of `w`. + + The complete version:: + + pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2)) + + This version has a correction + term to improve admissibility. For `w` greater than 5, the + correction term is negligible. + + Note that the energy of the return wavelet is not normalised + according to `s`. + + The fundamental frequency of this wavelet in Hz is given + by ``f = 2*s*w*r / M`` where `r` is the sampling rate. + + Note: This function was created before `cwt` and is not compatible + with it. + + """ + return _morlet_kernel(w, s, complete, size=M) + + +_ricker_kernel = cupy.ElementwiseKernel( + "float64 a", + "float64 total", + """ + const double vec { i - ( _ind.size() - 1.0 ) * 0.5 }; + const double xsq { vec * vec }; + const double mod { 1 - xsq / wsq }; + const double gauss { exp( -xsq / ( 2.0 * wsq ) ) }; + + total = A * mod * gauss; + """, + "_ricker_kernel", + options=("-std=c++11",), + loop_prep="const double A { 2.0 / ( sqrt( 3 * a ) * pow( M_PI, 0.25 ) ) };" + " const double wsq { a * a };", +) + + +def ricker(points, a): + """ + Return a Ricker wavelet, also known as the "Mexican hat wavelet". + + It models the function: + + ``A (1 - x^2/a^2) exp(-x^2/2 a^2)``, + + where ``A = 2/sqrt(3a)pi^1/4``. + + Parameters + ---------- + points : int + Number of points in `vector`. + Will be centered around 0. + a : scalar + Width parameter of the wavelet. + + Returns + ------- + vector : (N,) ndarray + Array of length `points` in shape of ricker curve. + + Examples + -------- + >>> import cupyx.scipy.signal + >>> import cupy as cp + >>> import matplotlib.pyplot as plt + + >>> points = 100 + >>> a = 4.0 + >>> vec2 = cupyx.scipy.signal.ricker(points, a) + >>> print(len(vec2)) + 100 + >>> plt.plot(cupy.asnumpy(vec2)) + >>> plt.show() + + """ + return _ricker_kernel(a, size=int(points)) + + +_morlet2_kernel = cupy.ElementwiseKernel( + "float64 w, float64 s", + "complex128 output", + """ + const double x { ( i - ( _ind.size() - 1.0 ) * 0.5 ) / s }; + + thrust::complex temp { exp( + thrust::complex( 0, w * x ) ) }; + + output = sqrt( 1 / s ) * temp * exp( -0.5 * ( x * x ) ) * + pow( M_PI, -0.25 ) + """, + "_morlet_kernel", + options=("-std=c++11",), + loop_prep="", +) + + +def morlet2(M, s, w=5): + """ + Complex Morlet wavelet, designed to work with `cwt`. + Returns the complete version of morlet wavelet, normalised + according to `s`:: + + exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s) + + Parameters + ---------- + M : int + Length of the wavelet. + s : float + Width parameter of the wavelet. + w : float, optional + Omega0. Default is 5 + + Returns + ------- + morlet : (M,) ndarray + + See Also + -------- + morlet : Implementation of Morlet wavelet, incompatible with `cwt` + + Notes + ----- + This function was designed to work with `cwt`. Because `morlet2` + returns an array of complex numbers, the `dtype` argument of `cwt` + should be set to `complex128` for best results. + + Note the difference in implementation with `morlet`. + The fundamental frequency of this wavelet in Hz is given by:: + + f = w*fs / (2*s*np.pi) + + where ``fs`` is the sampling rate and `s` is the wavelet width parameter. + Similarly we can get the wavelet width parameter at ``f``:: + + s = w*fs / (2*f*np.pi) + + Examples + -------- + >>> from cupyx.scipy import signal + >>> import matplotlib.pyplot as plt + >>> M = 100 + >>> s = 4.0 + >>> w = 2.0 + >>> wavelet = signal.morlet2(M, s, w) + >>> plt.plot(abs(wavelet)) + >>> plt.show() + + This example shows basic use of `morlet2` with `cwt` in time-frequency + analysis: + + >>> from cupyx.scipy import signal + >>> import matplotlib.pyplot as plt + >>> t, dt = np.linspace(0, 1, 200, retstep=True) + >>> fs = 1/dt + >>> w = 6. + >>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t) + >>> freq = np.linspace(1, fs/2, 100) + >>> widths = w*fs / (2*freq*np.pi) + >>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w) + >>> plt.pcolormesh(t, freq, np.abs(cwtm), + cmap='viridis', shading='gouraud') + >>> plt.show() + """ + return _morlet2_kernel(w, s, size=int(M)) + + +def cwt(data, wavelet, widths): + """ + Continuous wavelet transform. + + Performs a continuous wavelet transform on `data`, + using the `wavelet` function. A CWT performs a convolution + with `data` using the `wavelet` function, which is characterized + by a width parameter and length parameter. + + Parameters + ---------- + data : (N,) ndarray + data on which to perform the transform. + wavelet : function + Wavelet function, which should take 2 arguments. + The first argument is the number of points that the returned vector + will have (len(wavelet(length,width)) == length). + The second is a width parameter, defining the size of the wavelet + (e.g. standard deviation of a gaussian). See `ricker`, which + satisfies these requirements. + widths : (M,) sequence + Widths to use for transform. + + Returns + ------- + cwt: (M, N) ndarray + Will have shape of (len(widths), len(data)). + + Notes + ----- + :: + + length = min(10 * width[ii], len(data)) + cwt[ii,:] = cupyx.scipy.signal.convolve(data, wavelet(length, + width[ii]), mode='same') + + Examples + -------- + >>> import cupyx.scipy.signal + >>> import cupy as cp + >>> import matplotlib.pyplot as plt + >>> t = cupy.linspace(-1, 1, 200, endpoint=False) + >>> sig = cupy.cos(2 * cupy.pi * 7 * t) + cupyx.scipy.signal.gausspulse(t - 0.4, fc=2) + >>> widths = cupy.arange(1, 31) + >>> cwtmatr = cupyx.scipy.signal.cwt(sig, cupyx.scipy.signal.ricker, widths) + >>> plt.imshow(abs(cupy.asnumpy(cwtmatr)), extent=[-1, 1, 31, 1], + cmap='PRGn', aspect='auto', vmax=abs(cwtmatr).max(), + vmin=-abs(cwtmatr).max()) + >>> plt.show() + + """ # NOQA + if cupy.asarray(wavelet(1, 1)).dtype.char in "FDG": + dtype = cupy.complex128 + else: + dtype = cupy.float64 + + output = cupy.empty([len(widths), len(data)], dtype=dtype) + + for ind, width in enumerate(widths): + N = np.min([10 * int(width), len(data)]) + wavelet_data = cupy.conj(wavelet(N, int(width)))[::-1] + output[ind, :] = convolve(data, wavelet_data, mode="same") + return output diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..988de8e0acadd2d227d1c4c3337e7f141bd1d296 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/__init__.py @@ -0,0 +1,23 @@ +from cupyx.scipy.signal.windows._windows import general_cosine # NOQA +from cupyx.scipy.signal.windows._windows import boxcar # NOQA +from cupyx.scipy.signal.windows._windows import triang # NOQA +from cupyx.scipy.signal.windows._windows import parzen # NOQA +from cupyx.scipy.signal.windows._windows import bohman # NOQA +from cupyx.scipy.signal.windows._windows import blackman # NOQA +from cupyx.scipy.signal.windows._windows import nuttall # NOQA +from cupyx.scipy.signal.windows._windows import blackmanharris # NOQA +from cupyx.scipy.signal.windows._windows import flattop # NOQA +from cupyx.scipy.signal.windows._windows import bartlett # NOQA +from cupyx.scipy.signal.windows._windows import hann # NOQA +from cupyx.scipy.signal.windows._windows import tukey # NOQA +from cupyx.scipy.signal.windows._windows import barthann # NOQA +from cupyx.scipy.signal.windows._windows import general_hamming # NOQA +from cupyx.scipy.signal.windows._windows import hamming # NOQA +from cupyx.scipy.signal.windows._windows import kaiser # NOQA +from cupyx.scipy.signal.windows._windows import gaussian # NOQA +from cupyx.scipy.signal.windows._windows import general_gaussian # NOQA +from cupyx.scipy.signal.windows._windows import chebwin # NOQA +from cupyx.scipy.signal.windows._windows import cosine # NOQA +from cupyx.scipy.signal.windows._windows import exponential # NOQA +from cupyx.scipy.signal.windows._windows import taylor # NOQA +from cupyx.scipy.signal.windows._windows import get_window # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b829ae4ac5ffa28e4e65a750914b14b65bd36658 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/__pycache__/_windows.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/__pycache__/_windows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6f121a46911cfbb6595ddf5537b4ffb00ce63ce Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/__pycache__/_windows.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/_windows.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/_windows.py new file mode 100644 index 0000000000000000000000000000000000000000..f803ac6fdc12669441bada8187d61367b79d7580 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/windows/_windows.py @@ -0,0 +1,2196 @@ +""" +Filtering and spectral estimation windows. + +Some of the functions defined on this namespace were ported directly +from CuSignal under terms of the MIT license. +""" + +# Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import warnings +from typing import Set + +import cupy +import numpy as np + + +def _len_guards(M): + """Handle small or incorrect window lengths""" + if int(M) != M or M < 0: + raise ValueError("Window length M must be a non-negative integer") + return M <= 1 + + +def _extend(M, sym): + """Extend window by 1 sample if needed for DFT-even symmetry""" + if not sym: + return M + 1, True + else: + return M, False + + +def _truncate(w, needed): + """Truncate window by 1 sample if needed for DFT-even symmetry""" + if needed: + return w[:-1] + else: + return w + + +_general_cosine_kernel = cupy.ElementwiseKernel( + "raw T a, int32 n", + "T w", + """ + const T fac { -M_PI + delta * i }; + T temp {}; + for ( int k = 0; k < n; k++ ) { + temp += a[k] * cos( k * fac ); + } + w = temp; + """, + "_general_cosine_kernel", + options=("-std=c++11",), + loop_prep="const double delta { ( M_PI - -M_PI ) / ( _ind.size() - 1 ) }", +) + + +def general_cosine(M, a, sym=True): + r""" + Generic weighted sum of cosine terms window + + Parameters + ---------- + M : int + Number of points in the output window + a : array_like + Sequence of weighting coefficients. This uses the convention of being + centered on the origin, so these will typically all be positive + numbers, not alternating sign. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Notes + ----- + For more information, see [1]_ and [2]_ + + References + ---------- + .. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE + Transactions on Acoustics, Speech, and Signal Processing, vol. 29, + no. 1, pp. 84-91, Feb 1981. + `10.1109/TASSP.1981.1163506 `_ + .. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the + Discrete Fourier transform (DFT), including a comprehensive list of + window functions and some new flat-top windows", February 15, 2002 + https://holometer.fnal.gov/GH_FFT.pdf + + Examples + -------- + Heinzel describes a flat-top window named "HFT90D" with formula: [2]_ + + .. math:: w_j = 1 - 1.942604 \cos(z) + 1.340318 \cos(2z) + - 0.440811 \cos(3z) + 0.043097 \cos(4z) + + where + + .. math:: z = \frac{2 \pi j}{N}, j = 0...N - 1 + + Since this uses the convention of starting at the origin, to reproduce the + window, we need to convert every other coefficient to a positive number: + + >>> HFT90D = [1, 1.942604, 1.340318, 0.440811, 0.043097] + + The paper states that the highest sidelobe is at -90.2 dB. Reproduce + Figure 42 by plotting the window and its frequency response, and confirm + the sidelobe level in red: + + >>> from cupyx.scipy.signal.windows import general_cosine + >>> from cupy.fft import fft, fftshift + >>> import cupy + >>> import matplotlib.pyplot as plt + + >>> window = general_cosine(1000, HFT90D, sym=False) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("HFT90D window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 10000) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = cupy.abs(fftshift(A / cupy.abs(A).max())) + >>> response = 20 * cupy.log10(cupy.maximum(response, 1e-10)) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-50/1000, 50/1000, -140, 0]) + >>> plt.title("Frequency response of the HFT90D window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.axhline(-90.2, color='red') + >>> plt.show() + """ # NOQA + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + a = cupy.asarray(a, dtype=cupy.float64) + + w = _general_cosine_kernel(a, len(a), size=M) + + return _truncate(w, needs_trunc) + + +def boxcar(M, sym=True): + r"""Return a boxcar or rectangular window. + + Also known as a rectangular window or Dirichlet window, this is equivalent + to no window at all. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + Whether the window is symmetric. (Has no effect for boxcar.) + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1. + + Examples + -------- + Plot the window and its frequency response: + + >>> from cupyx.scipy.signal.windows import boxcar + >>> import cupy + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = boxcar(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Boxcar window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the boxcar window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + w = cupy.ones(M, dtype=cupy.float64) + + return _truncate(w, needs_trunc) + + +_triang_kernel = cupy.ElementwiseKernel( + "", + "float64 w", + """ + int n {}; + if ( i < m ) { + n = i + 1; + } else { + n = _ind.size() - i; + } + + if ( odd ) { + w = 2.0 * n / ( _ind.size() + 1.0 ); + } else { + w = ( 2.0 * n - 1.0 ) / _ind.size(); + } + """, + "_triang_kernel", + options=("-std=c++11",), + loop_prep="const int m { static_cast( 0.5 * _ind.size() ) }; \ + const bool odd { _ind.size() & 1 };", +) + + +def triang(M, sym=True): + r"""Return a triangular window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + See Also + -------- + bartlett : A triangular window that touches zero + + Examples + -------- + Plot the window and its frequency response: + + >>> from cupyx.scipy.signal.windows import triang + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = triang(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Triangular window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = cupy.abs(fftshift(A / cupy.abs(A).max())) + >>> response = 20 * cupy.log10(cupy.maximum(response, 1e-10)) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the triangular window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + w = _triang_kernel(size=M) + + return _truncate(w, needs_trunc) + + +_parzen_kernel = cupy.ElementwiseKernel( + "", + "float64 w", + """ + double n {}; + double temp {}; + double sizeS1 {}; + + if ( odd ) { + sizeS1 = s1 - start + 1.0; + } else { + s1 += 0.5; + s2 += 0.5; + sizeS1 = s1 - start; + } + + double sizeS2 { s2 - start + 1.0 - sizeS1 }; + + if ( i < sizeS1 ) { + n = i + start; + temp = 1.0 - abs( n ) * den; + w = 2.0 * ( temp * temp * temp ); + } else if ( i >= sizeS1 && i < ( sizeS1 + sizeS2 ) ) { + n = ( i - sizeS1 - s2 ); + temp = abs( n ) * den; + w = 1.0 - 6.0 * temp * temp + 6.0 * temp * temp * temp; + } else { + n = s1 - ( i - ( sizeS2 + sizeS1 - ( 1 - odd ) ) ); + temp = 1.0 - abs( n ) * den; + w = 2.0 * temp * temp * temp; + } + """, + "_parzen_kernel", + options=("-std=c++11",), + loop_prep="const double start { 0.5 * -( _ind.size () - 1 ) }; \ + const double den { 1.0 / ( 0.5 * _ind.size () ) }; \ + const bool odd { _ind.size() & 1 }; \ + double s1 { floor(-0.25 * ( _ind.size () - 1 ) ) }; \ + double s2 { floor(0.25 * ( _ind.size () - 1 ) ) };", +) + + +def parzen(M, sym=True): + """Return a Parzen window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero, an empty array + is returned. An exception is thrown when it is negative. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + For more information, see [1]_. + + References + ---------- + .. [1] E. Parzen, "Mathematical Considerations in the Estimation of + Spectra", Technometrics, Vol. 3, No. 2 (May, 1961), pp. 167-190 + + Examples + -------- + Plot the window and its frequency response: + + >>> import cupy as cp + >>> from cupyx.scipy import signal + >>> from cupyx.scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = signal.windows.parzen(51) + >>> plt.plot(window) + >>> plt.title("Parzen window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cp.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cp.log10(cp.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Parzen window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + """ + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + w = _parzen_kernel(size=M) + + return _truncate(w, needs_trunc) + + +_bohman_kernel = cupy.ElementwiseKernel( + "", + "float64 w", + """ + const double fac { abs( start + delta * ( i - 1 ) ) }; + if ( i != 0 && i != ( _ind.size() - 1 ) ) { + w = ( 1.0 - fac ) * cos( M_PI * fac ) + 1.0 / M_PI * sin( M_PI * fac ); + } else { + w = 0.0; + } + """, + "_bohman_kernel", + options=("-std=c++11",), + loop_prep="const double delta { 2.0 / ( _ind.size() - 1 ) }; \ + const double start { -1.0 + delta };", +) + + +def bohman(M, sym=True): + r"""Return a Bohman window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Examples + -------- + Plot the window and its frequency response: + + >>> from cupyx.scipy.signal.windows import bohman + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = bohman(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Bohman window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Bohman window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + w = _bohman_kernel(size=M) + + return _truncate(w, needs_trunc) + + +def blackman(M, sym=True): + r""" + Return a Blackman window. + + The Blackman window is a taper formed by using the first three terms of + a summation of cosines. It was designed to have close to the minimal + leakage possible. It is close to optimal, only slightly worse than a + Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M) + + The "exact Blackman" window was designed to null out the third and fourth + sidelobes, but has discontinuities at the boundaries, resulting in a + 6 dB/oct fall-off. This window is an approximation of the "exact" window, + which does not null the sidelobes as well, but is smooth at the edges, + improving the fall-off rate to 18 dB/oct. [3]_ + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the Kaiser window. + + For more information, see [1]_, [2]_, and [3]_ + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + .. [3] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic + Analysis with the Discrete Fourier Transform". Proceedings of the + IEEE 66 (1): 51-83. + `10.1109/PROC.1978.10837 `_ + + Examples + -------- + Plot the window and its frequency response: + + >>> from cupyx.scipy.signal import blackman + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = blackman(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Blackman window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = cupy.abs(fftshift(A / cupy.abs(A).max())) + >>> response = 20 * cupy.log10(cupy.maximum(response, 1e-10)) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Blackman window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's blackman function + return general_cosine(M, [0.42, 0.50, 0.08], sym) + + +def nuttall(M, sym=True): + r"""Return a minimum 4-term Blackman-Harris window according to Nuttall. + + This variation is called "Nuttall4c" by Heinzel. [2]_ + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + For more information, see [1]_ and [2]_ + + References + ---------- + .. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE + Transactions on Acoustics, Speech, and Signal Processing, vol. 29, + no. 1, pp. 84-91, Feb 1981. + `10.1109/TASSP.1981.1163506 `_ + .. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the + Discrete Fourier transform (DFT), including a comprehensive list of + window functions and some new flat-top windows", February 15, 2002 + https://holometer.fnal.gov/GH_FFT.pdf + + Examples + -------- + Plot the window and its frequency response: + + >>> from cupyx.scipy.signal.windows import nuttall + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = nuttall(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Nuttall window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Nuttall window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ # NOQA + return general_cosine(M, [0.3635819, 0.4891775, 0.1365995, 0.0106411], sym) + + +def blackmanharris(M, sym=True): + r"""Return a minimum 4-term Blackman-Harris window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Examples + -------- + Plot the window and its frequency response: + + >>> from cupyx.scipy.signal.windows import blackmanharris + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = blackmanharris(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Blackman-Harris window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Blackman-Harris window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + return general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym) + + +def flattop(M, sym=True): + r"""Return a flat top window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + Flat top windows are used for taking accurate measurements of signal + amplitude in the frequency domain, with minimal scalloping error from the + center of a frequency bin to its edges, compared to others. This is a + 5th-order cosine window, with the 5 terms optimized to make the main lobe + maximally flat. [1]_ + + References + ---------- + .. [1] D'Antona, Gabriele, and A. Ferrero, "Digital Signal Processing for + Measurement Systems", Springer Media, 2006, p. 70 + `10.1007/0-387-28666-7 `_ + + Examples + -------- + Plot the window and its frequency response: + + >>> from cupyx.scipy.signal.windows import flattop + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = flattop(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Flat top window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the flat top window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + a = [0.21557895, 0.41663158, 0.277263158, 0.083578947, 0.006947368] + return general_cosine(M, a, sym) + + +_bartlett_kernel = cupy.ElementwiseKernel( + "", + "float64 w", + """ + if ( i <= temp ) { + w = 2.0 * i * N; + } else { + w = 2.0 - 2.0 * i * N; + } + """, + "_bartlett_kernel", + options=("-std=c++11",), + loop_prep="const double N { 1.0 / ( _ind.size() - 1 ) }; \ + const double temp { 0.5 * ( _ind.size() - 1 ) };", +) + + +def bartlett(M, sym=True): + r""" + Return a Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The triangular window, with the first and last samples equal to zero + and the maximum value normalized to 1 (though the value 1 does not + appear if `M` is even and `sym` is True). + + See Also + -------- + triang : A triangular window that does not touch zero at the ends + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \frac{2}{M-1} \left( + \frac{M-1}{2} - \left|n - \frac{M-1}{2}\right| + \right) + + Most references to the Bartlett window come from the signal + processing literature, where it is used as one of many windowing + functions for smoothing values. Note that convolution with this + window produces linear interpolation. It is also known as an + apodization (which means"removing the foot", i.e. smoothing + discontinuities at the beginning and end of the sampled signal) or + tapering function. The Fourier transform of the Bartlett is the product + of two sinc functions. + Note the excellent discussion in Kanasewich. [2]_ + + For more information, see [1]_, [2]_, [3]_, [4]_ and [5]_ + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [4] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + Examples + -------- + Plot the window and its frequency response: + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = cupyx.scipy.signal.windows.bartlett(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Bartlett window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Bartlett window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's bartlett function + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + w = _bartlett_kernel(size=M) + + return _truncate(w, needs_trunc) + + +def hann(M, sym=True): + r""" + Return a Hann window. + + The Hann window is a taper formed by using a raised cosine or sine-squared + with ends that touch zero. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Hann window is defined as + + .. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right) + \qquad 0 \leq n \leq M-1 + + The window was named for Julius von Hann, an Austrian meteorologist. It is + also known as the Cosine Bell. It is sometimes erroneously referred to as + the "Hanning" window, from the use of "hann" as a verb in the original + paper and confusion with the very similar Hamming window. + + Most references to the Hann window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + For more information, see [1]_, [2]_, [3]_, and [4]_ + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + Plot the window and its frequency response: + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = cupyx.scipy.signal.windows.hann(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Hann window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = cupy.abs(fftshift(A / cupy.abs(A).max())) + >>> response = 20 * cupy.log10(np.maximum(response, 1e-10)) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Hann window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + # Docstring adapted from NumPy's hanning function + return general_hamming(M, 0.5, sym) + + +_tukey_kernel = cupy.ElementwiseKernel( + "float64 alpha", + "float64 w", + """ + if ( i < ( width + 1 ) ) { + w = 0.5 * ( 1 + cos( M_PI * ( -1.0 + 2.0 * i / alpha * N ) ) ); + } else if ( i >= ( width + 1 ) && i < ( _ind.size() - width - 1) ) { + w = 1.0; + } else { + w = 0.5 * + ( 1.0 + cos( M_PI * ( -2.0 / alpha + 1 + 2.0 * i / alpha * N ) ) ); + } + """, + "_tukey_kernel", + options=("-std=c++11",), + loop_prep="const double N { 1.0 / ( _ind.size() - 1 ) }; \ + const int width { static_cast( alpha * \ + ( _ind.size() - 1 ) * 0.5 ) }", +) + + +def tukey(M, alpha=0.5, sym=True): + r"""Return a Tukey window, also known as a tapered cosine window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + alpha : float, optional + Shape parameter of the Tukey window, representing the fraction of the + window inside the cosine tapered region. + If zero, the Tukey window is equivalent to a rectangular window. + If one, the Tukey window is equivalent to a Hann window. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + For more information, see [1]_ and [2]_. + + References + ---------- + .. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic + Analysis with the Discrete Fourier Transform". Proceedings of the + IEEE 66 (1): 51-83. + `10.1109/PROC.1978.10837 `_ + .. [2] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function#Tukey_window + + Examples + -------- + Plot the window and its frequency response: + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = cupyx.scipy.signal.windows.tukey(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Tukey window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + >>> plt.ylim([0, 1.1]) + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Tukey window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return cupy.ones(M) + + if alpha <= 0: + return cupy.ones(M, "d") + elif alpha >= 1.0: + return hann(M, sym=sym) + + M, needs_trunc = _extend(M, sym) + + w = _tukey_kernel(alpha, size=M) + + return _truncate(w, needs_trunc) + + +_barthann_kernel = cupy.ElementwiseKernel( + "", + "float64 w", + """ + const double fac { abs( i * N - 0.5 ) }; + w = 0.62 - 0.48 * fac + 0.38 * cos(2.0 * M_PI * fac); + """, + "_barthann_kernel", + options=("-std=c++11",), + loop_prep="const double N { 1.0 / ( _ind.size() - 1 ) };", +) + + +def barthann(M, sym=True): + r"""Return a modified Bartlett-Hann window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Examples + -------- + Plot the window and its frequency response: + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = cupyx.scipy.signal.windows.barthann(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Bartlett-Hann window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Bartlett-Hann window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + w = _barthann_kernel(size=M) + + return _truncate(w, needs_trunc) + + +def general_hamming(M, alpha, sym=True): + r"""Return a generalized Hamming window. + + The generalized Hamming window is constructed by multiplying a rectangular + window by one period of a cosine function [1]_. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + alpha : float + The window coefficient, :math:`\alpha` + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The generalized Hamming window is defined as + + .. math:: w(n) = \alpha - + \left(1 - \alpha\right) \cos\left(\frac{2\pi{n}}{M-1}\right) + \qquad 0 \leq n \leq M-1 + + Both the common Hamming window and Hann window are special cases of the + generalized Hamming window with :math:`\alpha` = 0.54 and :math:`\alpha` = + 0.5, respectively [2]_. + + See Also + -------- + hamming, hann + + Examples + -------- + The Sentinel-1A/B Instrument Processing Facility uses generalized Hamming + windows in the processing of spaceborne Synthetic Aperture Radar (SAR) + data [3]_. The facility uses various values for the :math:`\alpha` + parameter based on operating mode of the SAR instrument. Some common + :math:`\alpha` values include 0.75, 0.7 and 0.52 [4]_. As an example, we + plot these different windows. + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> fig1, spatial_plot = plt.subplots() + >>> spatial_plot.set_title("Generalized Hamming Windows") + >>> spatial_plot.set_ylabel("Amplitude") + >>> spatial_plot.set_xlabel("Sample") + + >>> fig2, freq_plot = plt.subplots() + >>> freq_plot.set_title("Frequency Responses") + >>> freq_plot.set_ylabel("Normalized magnitude [dB]") + >>> freq_plot.set_xlabel("Normalized frequency [cycles per sample]") + + >>> for alpha in [0.75, 0.7, 0.52]: + ... window = cupyx.scipy.signal.windows.general_hamming(41, alpha) + ... spatial_plot.plot(cupy.asnumpy(window), label="{:.2f}".format(alpha)) + ... A = fft(window, 2048) / (len(window)/2.0) + ... freq = cupy.linspace(-0.5, 0.5, len(A)) + ... response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + ... freq_plot.plot( + ... cupy.asnumpy(freq), cupy.asnumpy(response), + ... label="{:.2f}".format(alpha) + ... ) + >>> freq_plot.legend(loc="upper right") + >>> spatial_plot.legend(loc="upper right") + + References + ---------- + .. [1] DSPRelated, "Generalized Hamming Window Family", + https://www.dsprelated.com/freebooks/sasp/Generalized_Hamming_Window_Family.html + .. [2] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [3] Riccardo Piantanida ESA, "Sentinel-1 Level 1 Detailed Algorithm + Definition", + https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Level-1-Detailed-Algorithm-Definition + .. [4] Matthieu Bourbigot ESA, "Sentinel-1 Product Definition", + https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Definition + """ # NOQA + return general_cosine(M, [alpha, 1.0 - alpha], sym) + + +_hamming_kernel = cupy.ElementwiseKernel( + "", + "float64 w", + """ + w = 0.54 - 0.46 * cos(2.0 * M_PI * i * N); + """, + "_hamming_kernel", + options=("-std=c++11",), + loop_prep="const double N { 1.0 / ( _ind.size() - 1 ) };", +) + + +def hamming(M, sym=True): + r""" + Return a Hamming window. + + The Hamming window is a taper formed by using a raised cosine with + non-zero endpoints, optimized to minimize the nearest side lobe. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right) + \qquad 0 \leq n \leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and + is described in Blackman and Tukey. It was recommended for smoothing the + truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + For more information, see [1]_, [2]_, [3]_ and [4]_ + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + Plot the window and its frequency response: + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = cupyx.scipy.signal.windows.hamming(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Hamming window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Hamming window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + return general_hamming(M, 0.54, sym) + + +_kaiser_kernel = cupy.ElementwiseKernel( + "float64 beta", + "float64 w", + """ + const double temp { ( i - alpha ) / alpha }; + w = cyl_bessel_i0( beta * sqrt( 1.0 - ( temp * temp ) ) ) / + cyl_bessel_i0( beta ); + """, + "_kaiser_kernel", + options=("-std=c++11",), + loop_prep="const double alpha { 0.5 * ( _ind.size() - 1 ) };", +) + + +def kaiser(M, beta, sym=True): + r""" + Return a Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + beta : float + Shape parameter, determines trade-off between main-lobe width and + side lobe level. As beta gets large, the window narrows. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}} + \right)/I_0(\beta) + + with + + .. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple approximation + to the DPSS window based on Bessel functions. + The Kaiser window is a very good approximation to the Digital Prolate + Spheroidal Sequence, or Slepian window, which is the transform which + maximizes the energy in the main lobe of the window relative to total + energy. + + The Kaiser can approximate other windows by varying the beta parameter. + (Some literature uses alpha = beta/pi.) [4]_ + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hann + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise NaNs will + be returned. + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + For more information, see [1]_, [2]_, [3]_, and [4]_ + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] F. J. Harris, "On the use of windows for harmonic analysis with the + discrete Fourier transform," Proceedings of the IEEE, vol. 66, + no. 1, pp. 51-83, Jan. 1978. + `10.1109/PROC.1978.10837 `_ + + + Examples + -------- + Plot the window and its frequency response: + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = cupyx.scipy.signal.windows.kaiser(51, beta=14) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title(r"Kaiser window ($\beta$=14)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return cupy.ones(M) + + M, needs_trunc = _extend(M, sym) + w = _kaiser_kernel(beta, size=M) + + return _truncate(w, needs_trunc) + + +_gaussian_kernel = cupy.ElementwiseKernel( + "float64 std", + "float64 w", + """ + const double n { i - (_ind.size() - 1.0) * 0.5 }; + w = exp( - ( n * n ) / sig2 ); + """, + "_gaussian_kernel", + options=("-std=c++11",), + loop_prep="const double sig2 { 2.0 * std * std };", +) + + +def gaussian(M, std, sym=True): + r"""Return a Gaussian window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + std : float + The standard deviation, sigma. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Gaussian window is defined as + + .. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 } + + Examples + -------- + Plot the window and its frequency response: + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = cupyx.scipy.signal.windows.gaussian(51, std=7) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title(r"Gaussian window ($\sigma$=7)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + w = _gaussian_kernel(std, size=M) + + return _truncate(w, needs_trunc) + + +_general_gaussian_kernel = cupy.ElementwiseKernel( + "float64 p, float64 sig", + "float64 w", + """ + const double n { i - ( _ind.size() - 1.0 ) * 0.5 }; + w = exp( -0.5 * pow( abs( n / sig ), 2.0 * p ) ); + """, + "_general_gaussian_kernel", + options=("-std=c++11",), +) + + +def general_gaussian(M, p, sig, sym=True): + r"""Return a window with a generalized Gaussian shape. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + p : float + Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is + the same shape as the Laplace distribution. + sig : float + The standard deviation, sigma. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The generalized Gaussian window is defined as + + .. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} } + + the half-power point is at + + .. math:: (2 \log(2))^{1/(2 p)} \sigma + + Examples + -------- + Plot the window and its frequency response: + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = cupyx.scipy.signal.windows.general_gaussian(51, p=1.5, sig=7) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title(r"Freq. resp. of the gen. Gaussian " + ... r"window (p=1.5, $\sigma$=7)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + w = _general_gaussian_kernel(p, sig, size=M) + + return _truncate(w, needs_trunc) + + +_chebwin_kernel = cupy.ElementwiseKernel( + "int64 order, float64 beta", + "complex128 p", + """ + double real {}; + const double x { beta * cos( i * N ) }; + + if ( x > 1 ) { + real = cosh( order * acosh( x ) ); + } else if ( x < -1 ) { + real = ( 2.0 * ( _ind.size() & 1 ) - 1.0 ) * + cosh( order * acosh( -x ) ); + } else { + real = cos( order * acos( x ) ); + } + + if ( odd ) { + p = real; + } else { + p = real * exp( thrust::complex( 0.0, N * i ) ); + } + """, + "_chebwin_kernel", + options=("-std=c++11",), + loop_prep="const double N { M_PI * ( 1.0 / _ind.size() ) }; \ + const bool odd { _ind.size() & 1 };", +) + + +# `chebwin` contributed by Kumar Appaiah. +def chebwin(M, at, sym=True): + r"""Return a Dolph-Chebyshev window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + at : float + Attenuation (in dB). + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value always normalized to 1 + + Notes + ----- + This window optimizes for the narrowest main lobe width for a given order + `M` and sidelobe equiripple attenuation `at`, using Chebyshev + polynomials. It was originally developed by Dolph to optimize the + directionality of radio antenna arrays. + + Unlike most windows, the Dolph-Chebyshev is defined in terms of its + frequency response: + + .. math:: W(k) = \frac + {\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}} + {\cosh[M \cosh^{-1}(\beta)]} + + where + + .. math:: \beta = \cosh \left [\frac{1}{M} + \cosh^{-1}(10^\frac{A}{20}) \right ] + + and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`). + + The time domain window is then generated using the IFFT, so + power-of-two `M` are the fastest to generate, and prime number `M` are + the slowest. + + The equiripple condition in the frequency domain creates impulses in the + time domain, which appear at the ends of the window. + + For more information, see [1]_, [2]_ and [3]_ + + References + ---------- + .. [1] C. Dolph, "A current distribution for broadside arrays which + optimizes the relationship between beam width and side-lobe level", + Proceedings of the IEEE, Vol. 34, Issue 6 + .. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter", + American Meteorological Society (April 1997) + http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf + .. [3] F. J. Harris, "On the use of windows for harmonic analysis with the + discrete Fourier transforms", Proceedings of the IEEE, Vol. 66, + No. 1, January 1978 + + Examples + -------- + Plot the window and its frequency response: + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = cupyx.scipy.signal.windows.chebwin(51, at=100) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Dolph-Chebyshev window (100 dB)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + """ + + if abs(at) < 45: + warnings.warn( + "This window is not suitable for spectral analysis " + "for attenuation values lower than about 45dB because " + "the equivalent noise bandwidth of a Chebyshev window " + "does not grow monotonically with increasing sidelobe " + "attenuation when the attenuation is smaller than " + "about 45 dB." + ) + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + # compute the parameter beta + order = M - 1.0 + beta = np.cosh(1.0 / order * np.arccosh(10 ** (abs(at) / 20.0))) + + # Appropriate IDFT and filling up + # depending on even/odd M + p = _chebwin_kernel(order, beta, size=M) + if M % 2: + w = cupy.real(cupy.fft.fft(p)) + n = (M + 1) // 2 + w = w[:n] + w = cupy.concatenate((w[n - 1: 0: -1], w)) + else: + w = cupy.real(cupy.fft.fft(p)) + n = M // 2 + 1 + w = cupy.concatenate((w[n - 1: 0: -1], w[1:n])) + + w = w / cupy.max(w) + + return _truncate(w, needs_trunc) + + +_cosine_kernel = cupy.ElementwiseKernel( + "", + "float64 w", + """ + w = sin( M_PI / _ind.size() * ( i + 0.5 ) ); + """, + "_cosine_kernel", + options=("-std=c++11",), +) + + +def cosine(M, sym=True): + r"""Return a window with a simple cosine shape. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + + .. versionadded:: 0.13.0 + + Examples + -------- + Plot the window and its frequency response: + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> window = cupyx.scipy.signal.windows.cosine(51) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Cosine window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the cosine window") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.show() + + """ + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + w = _cosine_kernel(size=M) + + return _truncate(w, needs_trunc) + + +_exponential_kernel = cupy.ElementwiseKernel( + "float64 center, float64 tau", + "float64 w", + """ + w = exp( -abs( i - center ) / tau ); + """, + "_exponential_kernel", + options=("-std=c++11",), +) + + +def exponential(M, center=None, tau=1.0, sym=True): + r"""Return an exponential (or Poisson) window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + center : float, optional + Parameter defining the center location of the window function. + The default value if not given is ``center = (M-1) / 2``. This + parameter must take its default value for symmetric windows. + tau : float, optional + Parameter defining the decay. For ``center = 0`` use + ``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window + remaining at the end. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + w : ndarray + The window, with the maximum value normalized to 1 (though the value 1 + does not appear if `M` is even and `sym` is True). + + Notes + ----- + The Exponential window is defined as + + .. math:: w(n) = e^{-|n-center| / \tau} + + References + ---------- + S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)", + Technical Review 3, Bruel & Kjaer, 1987. + + Examples + -------- + Plot the symmetric window and its frequency response: + + >>> import cupyx.scipy.signal.windows + >>> import cupy as cp + >>> from cupy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + + >>> M = 51 + >>> tau = 3.0 + >>> window = cupyx.scipy.signal.windows.exponential(M, tau=tau) + >>> plt.plot(cupy.asnumpy(window)) + >>> plt.title("Exponential Window (tau=3.0)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = cupy.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * cupy.log10(cupy.abs(fftshift(A / cupy.abs(A).max()))) + >>> plt.plot(cupy.asnumpy(freq), cupy.asnumpy(response)) + >>> plt.axis([-0.5, 0.5, -35, 0]) + >>> plt.title("Frequency response of the Exponential window (tau=3.0)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + + This function can also generate non-symmetric windows: + + >>> tau2 = -(M-1) / np.log(0.01) + >>> window2 = cupyx.scipy.signal.windows.exponential(M, 0, tau2, False) + >>> plt.figure() + >>> plt.plot(cupy.asnumpy(window2)) + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + """ + if sym and center is not None: + raise ValueError("If sym==True, center must be None.") + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + if center is None: + center = (M - 1) / 2 + + w = _exponential_kernel(center, tau, size=M) + + return _truncate(w, needs_trunc) + + +_taylor_kernel = cupy.ElementwiseKernel( + "int64 nbar, raw float64 Fm, bool norm", + "float64 out", + """ + double temp { mod_pi * ( i - _ind.size() / 2.0 + 0.5 ) }; + double dot {}; + + for ( int k = 1; k < nbar; k++ ) { + dot += Fm[k-1] * cos( temp * k ); + } + out = 1.0 + 2.0 * dot; + + double scale { 1.0 }; + if (norm == 1) { + dot = 0; + temp = mod_pi * ( ( ( _ind.size() - 1.0 ) / 2.0 ) + - _ind.size() / 2.0 + 0.5 ); + for ( int k = 1; k < nbar; k++ ) { + dot += Fm[k-1] * cos( temp * k ); + } + scale = 1.0 / ( 1.0 + 2.0 * dot ); + } + + out *= scale; + """, + "_taylor_kernel", + options=("-std=c++11",), + loop_prep="const double mod_pi { 2.0 * M_PI / _ind.size() }", +) + + +def taylor(M, nbar=4, sll=30, norm=True, sym=True): + """ + Return a Taylor window. + The Taylor window taper function approximates the Dolph-Chebyshev window's + constant sidelobe level for a parameterized number of near-in sidelobes, + but then allows a taper beyond [2]_. + The SAR (synthetic aperture radar) community commonly uses Taylor + weighting for image formation processing because it provides strong, + selectable sidelobe suppression with minimum broadening of the + mainlobe [1]_. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + nbar : int, optional + Number of nearly constant level sidelobes adjacent to the mainlobe. + sll : float, optional + Desired suppression of sidelobe level in decibels (dB) relative to the + DC gain of the mainlobe. This should be a positive number. + norm : bool, optional + When True (default), divides the window by the largest (middle) value + for odd-length windows or the value that would occur between the two + repeated middle values for even-length windows such that all values + are less than or equal to 1. When False the DC gain will remain at 1 + (0 dB) and the sidelobes will be `sll` dB down. + sym : bool, optional + When True (default), generates a symmetric window, for use in filter + design. + When False, generates a periodic window, for use in spectral analysis. + + Returns + ------- + out : array + The window. When `norm` is True (default), the maximum value is + normalized to 1 (though the value 1 does not appear if `M` is + even and `sym` is True). + + See Also + -------- + chebwin, kaiser, bartlett, blackman, hamming, hanning + + References + ---------- + .. [1] W. Carrara, R. Goodman, and R. Majewski, "Spotlight Synthetic + Aperture Radar: Signal Processing Algorithms" Pages 512-513, + July 1995. + .. [2] Armin Doerry, "Catalog of Window Taper Functions for + Sidelobe Control", 2017. + https://www.researchgate.net/profile/Armin_Doerry/publication/316281181_Catalog_of_Window_Taper_Functions_for_Sidelobe_Control/links/58f92cb2a6fdccb121c9d54d/Catalog-of-Window-Taper-Functions-for-Sidelobe-Control.pdf + Examples + -------- + Plot the window and its frequency response: + >>> from scipy import signal + >>> from scipy.fft import fft, fftshift + >>> import matplotlib.pyplot as plt + >>> window = signal.windows.taylor(51, nbar=20, sll=100, norm=False) + >>> plt.plot(window) + >>> plt.title("Taylor window (100 dB)") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + >>> plt.figure() + >>> A = fft(window, 2048) / (len(window)/2.0) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) + >>> plt.plot(freq, response) + >>> plt.axis([-0.5, 0.5, -120, 0]) + >>> plt.title("Frequency response of the Taylor window (100 dB)") + >>> plt.ylabel("Normalized magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + """ # noqa: E501 + if _len_guards(M): + return cupy.ones(M) + M, needs_trunc = _extend(M, sym) + + # Original text uses a negative sidelobe level parameter and then negates + # it in the calculation of B. To keep consistent with other methods we + # assume the sidelobe level parameter to be positive. + B = 10 ** (sll / 20) + A = np.arccosh(B) / np.pi + s2 = nbar**2 / (A**2 + (nbar - 0.5) ** 2) + ma = np.arange(1, nbar) + + Fm = np.empty(nbar - 1) + signs = np.empty_like(ma) + signs[::2] = 1 + signs[1::2] = -1 + m2 = ma * ma + for mi, _ in enumerate(ma): + numer = signs[mi] * np.prod(1 - m2[mi] / s2 / (A**2 + (ma - 0.5) ** 2)) + denom = 2 * np.prod(1 - m2[mi] / m2[:mi]) * \ + np.prod(1 - m2[mi] / m2[mi + 1:]) + Fm[mi] = numer / denom + + w = _taylor_kernel(nbar, cupy.asarray(Fm), norm, size=M) + + return _truncate(w, needs_trunc) + + +def _fftautocorr(x): + """Compute the autocorrelation of a real array and crop the result.""" + N = x.shape[-1] + use_N = cupy.fft.next_fast_len(2 * N - 1) + x_fft = cupy.fft.rfft(x, use_N, axis=-1) + cxy = cupy.fft.irfft(x_fft * x_fft.conj(), n=use_N)[:, :N] + # Or equivalently (but in most cases slower): + # cxy = np.array([np.convolve(xx, yy[::-1], mode='full') + # for xx, yy in zip(x, x)])[:, N-1:2*N-1] + return cxy + + +_win_equiv_raw = { + ("barthann", "brthan", "bth"): (barthann, False), + ("bartlett", "bart", "brt"): (bartlett, False), + ("blackman", "black", "blk"): (blackman, False), + ("blackmanharris", "blackharr", "bkh"): (blackmanharris, False), + ("bohman", "bman", "bmn"): (bohman, False), + ("boxcar", "box", "ones", "rect", "rectangular"): (boxcar, False), + ("chebwin", "cheb"): (chebwin, True), + ("cosine", "halfcosine"): (cosine, False), + ("exponential", "poisson"): (exponential, True), + ("flattop", "flat", "flt"): (flattop, False), + ('general cosine', 'general_cosine'): (general_cosine, True), + ("gaussian", "gauss", "gss"): (gaussian, True), + ( + "general gaussian", + "general_gaussian", + "general gauss", + "general_gauss", + "ggs", + ): (general_gaussian, True), + ('general hamming', 'general_hamming'): (general_hamming, True), + ("hamming", "hamm", "ham"): (hamming, False), + ("hanning", "hann", "han"): (hann, False), + ("kaiser", "ksr"): (kaiser, True), + ("nuttall", "nutl", "nut"): (nuttall, False), + ("parzen", "parz", "par"): (parzen, False), + # ('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True), + ("triangle", "triang", "tri"): (triang, False), + ("tukey", "tuk"): (tukey, True), +} + +# Fill dict with all valid window name strings +_win_equiv = {} +for k, v in _win_equiv_raw.items(): + for key in k: + _win_equiv[key] = v[0] + +# Keep track of which windows need additional parameters +_needs_param: Set[str] = set() +for k, v in _win_equiv_raw.items(): + if v[1]: + _needs_param.update(k) + + +def get_window(window, Nx, fftbins=True): + r""" + Return a window of a given length and type. + + Parameters + ---------- + window : string, float, or tuple + The type of window to create. See below for more details. + Nx : int + The number of samples in the window. + fftbins : bool, optional + If True (default), create a "periodic" window, ready to use with + `ifftshift` and be multiplied by the result of an FFT (see also + `fftpack.fftfreq`). + If False, create a "symmetric" window, for use in filter design. + + Returns + ------- + get_window : ndarray + Returns a window of length `Nx` and type `window` + + Notes + ----- + Window types: + + - :func:`~cupyx.scipy.signal.windows.boxcar` + - :func:`~cupyx.scipy.signal.windows.triang` + - :func:`~cupyx.scipy.signal.windows.blackman` + - :func:`~cupyx.scipy.signal.windows.hamming` + - :func:`~cupyx.scipy.signal.windows.hann` + - :func:`~cupyx.scipy.signal.windows.bartlett` + - :func:`~cupyx.scipy.signal.windows.flattop` + - :func:`~cupyx.scipy.signal.windows.parzen` + - :func:`~cupyx.scipy.signal.windows.bohman` + - :func:`~cupyx.scipy.signal.windows.blackmanharris` + - :func:`~cupyx.scipy.signal.windows.nuttall` + - :func:`~cupyx.scipy.signal.windows.barthann` + - :func:`~cupyx.scipy.signal.windows.kaiser` (needs beta) + - :func:`~cupyx.scipy.signal.windows.gaussian` (needs standard deviation) + - :func:`~cupyx.scipy.signal.windows.general_gaussian` (needs power, width) + - :func:`~cupyx.scipy.signal.windows.chebwin` (needs attenuation) + - :func:`~cupyx.scipy.signal.windows.exponential` (needs decay scale) + - :func:`~cupyx.scipy.signal.windows.tukey` (needs taper fraction) + + If the window requires no parameters, then `window` can be a string. + + If the window requires parameters, then `window` must be a tuple + with the first argument the string name of the window, and the next + arguments the needed parameters. + + If `window` is a floating point number, it is interpreted as the beta + parameter of the :func:`~cupyx.scipy.signal.windows.kaiser` window. + + Each of the window types listed above is also the name of + a function that can be called directly to create a window of + that type. + + Examples + -------- + >>> import cupyx.scipy.signal.windows + >>> cupyx.scipy.signal.windows.get_window('triang', 7) + array([ 0.125, 0.375, 0.625, 0.875, 0.875, 0.625, 0.375]) + >>> cupyx.scipy.signal.windows.get_window(('kaiser', 4.0), 9) + array([0.08848053, 0.32578323, 0.63343178, 0.89640418, 1., + 0.89640418, 0.63343178, 0.32578323, 0.08848053]) + >>> cupyx.scipy.signal.windows.get_window(4.0, 9) + array([0.08848053, 0.32578323, 0.63343178, 0.89640418, 1., + 0.89640418, 0.63343178, 0.32578323, 0.08848053]) + + """ # NOQA + sym = not fftbins + try: + beta = float(window) + except (TypeError, ValueError): + args = () + if isinstance(window, tuple): + winstr = window[0] + if len(window) > 1: + args = window[1:] + elif isinstance(window, str): + if window in _needs_param: + raise ValueError( + "The '" + window + "' window needs one or " + "more parameters -- pass a tuple." + ) + else: + winstr = window + else: + raise ValueError( + "%s as window type is not supported." % str(type(window))) + + try: + winfunc = _win_equiv[winstr] + except KeyError: + raise ValueError("Unknown window type.") + + params = (Nx,) + args + (sym,) + else: + winfunc = kaiser + params = (Nx, beta, sym) + + return winfunc(*params) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..14b1d1799dc81a7f9239ae7602bb98abb6c2d83a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__init__.py @@ -0,0 +1,11 @@ +# Summary statistics + +from cupyx.scipy.stats._distributions import entropy # NOQA +from cupyx.scipy.stats._stats import trim_mean # NOQA + + +# Other statistical functionality + +from cupyx.scipy.stats._morestats import boxcox_llf # NOQA +from cupyx.scipy.stats._stats_py import zmap # NOQA +from cupyx.scipy.stats._stats_py import zscore # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c876914fb89bdee27bd5ca8adfe2d2bfa68997f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_distributions.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_distributions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91299612acb9137997d94e1b931c3942a3f96605 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_distributions.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_morestats.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_morestats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..157806fada01127934807330ceb467ebfc6c03e5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_morestats.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_stats.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_stats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..384bae33bc5d44613aa36e504c03dc666a17545e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_stats.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_stats_py.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_stats_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e86cb0f42318edd1a47c3782abb7f6983db4f65 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/__pycache__/_stats_py.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_distributions.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..089a86ce3245f6d63a4880e33743f22a1b8a59a9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_distributions.py @@ -0,0 +1,59 @@ +import math + +import cupy +from cupyx.scipy import special + + +def _normalize(x, axis): + """Normalize, preserving floating point precision of x.""" + x_sum = x.sum(axis=axis, keepdims=True) + if x.dtype.kind == 'f': + x /= x_sum + else: + x = x / x_sum + return x + + +def entropy(pk, qk=None, base=None, axis=0): + """Calculate the entropy of a distribution for given probability values. + + If only probabilities ``pk`` are given, the entropy is calculated as + ``S = -sum(pk * log(pk), axis=axis)``. + + If ``qk`` is not None, then compute the Kullback-Leibler divergence + ``S = sum(pk * log(pk / qk), axis=axis)``. + + This routine will normalize ``pk`` and ``qk`` if they don't sum to 1. + + Args: + pk (ndarray): Defines the (discrete) distribution. ``pk[i]`` is the + (possibly unnormalized) probability of event ``i``. + qk (ndarray, optional): Sequence against which the relative entropy is + computed. Should be in the same format as ``pk``. + base (float, optional): The logarithmic base to use, defaults to ``e`` + (natural logarithm). + axis (int, optional): The axis along which the entropy is calculated. + Default is 0. + + Returns: + S (cupy.ndarray): The calculated entropy. + + """ + if pk.dtype.kind == 'c' or qk is not None and qk.dtype.kind == 'c': + raise TypeError("complex dtype not supported") + + float_type = cupy.float32 if pk.dtype.char in 'ef' else cupy.float64 + pk = pk.astype(float_type, copy=False) + pk = _normalize(pk, axis) + if qk is None: + vec = special.entr(pk) + else: + if qk.shape != pk.shape: + raise ValueError("qk and pk must have same shape.") + qk = qk.astype(float_type, copy=False) + qk = _normalize(qk, axis) + vec = special.rel_entr(pk, qk) + s = cupy.sum(vec, axis=axis) + if base is not None: + s /= math.log(base) + return s diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_morestats.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_morestats.py new file mode 100644 index 0000000000000000000000000000000000000000..88be8205ac3bbc96873a7f66691b89c42238cdb0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_morestats.py @@ -0,0 +1,47 @@ +import cupy + + +def boxcox_llf(lmb, data): + """The boxcox log-likelihood function. + + Parameters + ---------- + lmb : scalar + Parameter for Box-Cox transformation + data : array-like + Data to calculate Box-Cox log-likelihood for. If + `data` is multi-dimensional, the log-likelihood + is calculated along the first axis + + Returns + ------- + llf : float or cupy.ndarray + Box-Cox log-likelihood of `data` given `lmb`. A float + for 1-D `data`, an array otherwise + + See Also + -------- + scipy.stats.boxcox_llf + + """ + + if data.ndim == 1 and data.dtype == cupy.float16: + data = data.astype(cupy.float64) + if data.ndim == 1 and data.dtype == cupy.float32: + data = data.astype(cupy.float64) + if data.ndim == 1 and data.dtype == cupy.complex64: + data = data.astype(cupy.complex128) + + N = data.shape[0] + if N == 0: + return cupy.array(cupy.nan) + + logdata = cupy.log(data) + + # Compute the variance of the transformed data + if lmb == 0: + variance = cupy.var(logdata, axis=0) + else: + variance = cupy.var(data**lmb / lmb, axis=0) + + return (lmb - 1) * cupy.sum(logdata, axis=0) - N/2 * cupy.log(variance) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_stats.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..e8b30ec451789dd4f884c9cd6403134693729b15 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_stats.py @@ -0,0 +1,77 @@ +""" +A collection of basic statistical functions for Python. + +References +---------- +.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard + Probability and Statistics Tables and Formulae. Chapman & Hall: New + York. 2000. +""" +import cupy as cp + + +def trim_mean(a, proportiontocut, axis=0): + """Return mean of array after trimming distribution from both tails. + + If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of + scores. The input is sorted before slicing. Slices off less if proportion + results in a non-integer slice index (i.e., conservatively slices off + `proportiontocut` ). + + Parameters + ---------- + a : cupy.ndarray + Input array. + proportiontocut : float + Fraction to cut off of both tails of the distribution. + axis : int or None, optional + Axis along which the trimmed means are computed. Default is 0. + If None, compute over the whole array `a`. + + Returns + ------- + trim_mean : ndarray + Mean of trimmed array. + + See Also + -------- + trimboth + tmean : Compute the trimmed mean ignoring values outside given `limits`. + + Examples + -------- + >>> import cupy as cp + >>> from cupyx.scipy import stats + >>> x = cp.arange(20) + >>> stats.trim_mean(x, 0.1) + array(9.5) + >>> x2 = x.reshape(5, 4) + >>> x2 + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15], + [16, 17, 18, 19]]) + >>> stats.trim_mean(x2, 0.25) + array([ 8., 9., 10., 11.]) + >>> stats.trim_mean(x2, 0.25, axis=1) + array([ 1.5, 5.5, 9.5, 13.5, 17.5]) + """ + if a.size == 0: + return cp.nan + + if axis is None: + a = a.ravel() + axis = 0 + + nobs = a.shape[axis] + lowercut = int(proportiontocut * nobs) + uppercut = nobs - lowercut + if (lowercut > uppercut): + raise ValueError("Proportion too big.") + + atmp = cp.partition(a, (lowercut, uppercut - 1), axis) + + sl = [slice(None)] * atmp.ndim + sl[axis] = slice(lowercut, uppercut) + return cp.mean(atmp[tuple(sl)], axis=axis) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_stats_py.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_stats_py.py new file mode 100644 index 0000000000000000000000000000000000000000..2525404d7ad296ce439aa42daca3a906ba3d8770 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/stats/_stats_py.py @@ -0,0 +1,138 @@ +import cupy + + +def _first(arr, axis): + """Return arr[..., 0:1, ...] where 0:1 is in the `axis` position + + """ + + return cupy.take_along_axis(arr, cupy.array(0, ndmin=arr.ndim), axis) + + +def _isconst(x): + """Check if all values in x are the same. nans are ignored. + x must be a 1d array. The return value is a 1d array + with length 1, so it can be used in cupy.apply_along_axis. + + """ + + y = x[~cupy.isnan(x)] + if y.size == 0: + return cupy.array([True]) + else: + return (y[0] == y).all(keepdims=True) + + +def zscore(a, axis=0, ddof=0, nan_policy='propagate'): + """Compute the z-score. + + Compute the z-score of each value in the sample, relative to + the sample mean and standard deviation. + + Parameters + ---------- + a : array-like + An array like object containing the sample data + axis : int or None, optional + Axis along which to operate. Default is 0. If None, + compute over the whole arrsy `a` + ddof : int, optional + Degrees of freedom correction in the calculation of the + standard deviation. Default is 0 + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle when input contains nan. 'propagate' + returns nan, 'raise' throws an error, 'omit' performs + the calculations ignoring nan values. Default is + 'propagate'. Note that when the value is 'omit', + nans in the input also propagate to the output, + but they do not affect the z-scores computed + for the non-nan values + + Returns + ------- + zscore : array-like + The z-scores, standardized by mean and standard deviation of + input array `a` + + """ + + return zmap(a, a, axis=axis, ddof=ddof, nan_policy=nan_policy) + + +def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'): + """Calculate the relative z-scores. + + Return an array of z-scores, i.e., scores that are standardized + to zero mean and unit variance, where mean and variance are + calculated from the comparison array. + + Parameters + ---------- + scores : array-like + The input for which z-scores are calculated + compare : array-like + The input from which the mean and standard deviation of + the normalization are taken; assumed to have the same + dimension as `scores` + axis : int or None, optional + Axis over which mean and variance of `compare` are calculated. + Default is 0. If None, compute over the whole array `scores` + ddof : int, optional + Degrees of freedom correction in the calculation of the + standard deviation. Default is 0 + nan_policy : {'propagate', 'raise', 'omit'}, optional + Defines how to handle the occurrence of nans in `compare`. + 'propagate' returns nan, 'raise' raises an exception, 'omit' + performs the calculations ignoring nan values. Default is + 'propagate'. Note that when the value is 'omit', nans in `scores` + also propagate to the output, but they do not affect the z-scores + computed for the non-nan values + + Returns + ------- + zscore : array-like + Z-scores, in the same shape as `scores` + + """ + + policies = ['propagate', 'raise', 'omit'] + + if nan_policy not in policies: + raise ValueError("nan_policy must be one of {%s}" % + ', '.join("'%s'" % s for s in policies)) + + a = compare + + if a.size == 0: + return cupy.empty(a.shape) + + if nan_policy == 'raise': + contains_nan = cupy.isnan(cupy.sum(a)) + + if contains_nan: # synchronize! + raise ValueError("The input contains nan values") + + if nan_policy == 'omit': + if axis is None: + mn = cupy.nanmean(a.ravel()) + std = cupy.nanstd(a.ravel(), ddof=ddof) + isconst = _isconst(a.ravel()) + else: + mn = cupy.nanmean(a, axis=axis, keepdims=True) + std = cupy.nanstd(a, axis=axis, keepdims=True, ddof=ddof) + isconst = (_first(a, axis) == a).all(axis=axis, keepdims=True) + else: + mn = a.mean(axis=axis, keepdims=True) + std = a.std(axis=axis, ddof=ddof, keepdims=True) + if axis is None: + isconst = (a.ravel()[0] == a).all() + else: + isconst = (_first(a, axis) == a).all(axis=axis, keepdims=True) + + # Set std deviations that are 0 to 1 to avoid division by 0. + std[isconst] = 1.0 + z = (scores - mn) / std + + # Set the outputs associated with a constant input to nan. + z[cupy.broadcast_to(isconst, z.shape)] = cupy.nan + return z