diff --git a/.gitattributes b/.gitattributes index 4cc0f25b9a84fb9506fe83aa88bcdcabec09e2b8..c415346754d185f9215e822008c5359053b02770 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1441,3 +1441,5 @@ vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client. vglm/bin/python filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/numpy/lib/__pycache__/_function_base_impl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/pyzmq.libs/libzmq-a430b4ce.so.5.2.5 filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/pyzmq.libs/libsodium-1b1f72d5.so.26.1.0 filter=lfs diff=lfs merge=lfs -text diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lstm.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lstm.h new file mode 100644 index 0000000000000000000000000000000000000000..a2bcee47190d39b3bf61666cf4f0b2a9451c6336 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lstm.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) +inline ::std::tuple lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); +} + +// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) +inline ::std::tuple lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_meta.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ce866ca76bc599503a880f4d74f1d04ff69d9031 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_reciprocal : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9054bc9e2eeb9cbabe83cd443455ad272907d533 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & view_as_real_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & view_as_real_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f6c9d79efbbe5ae0a47311bc7a549f6da0afc10 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5122b7fa6d9bd7e6340334e410a8931dc82709e1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__init__.py @@ -0,0 +1,73 @@ +from cupyx.scipy.ndimage._filters import correlate # NOQA +from cupyx.scipy.ndimage._filters import convolve # NOQA +from cupyx.scipy.ndimage._filters import correlate1d # NOQA +from cupyx.scipy.ndimage._filters import convolve1d # NOQA +from cupyx.scipy.ndimage._filters import uniform_filter1d # NOQA +from cupyx.scipy.ndimage._filters import uniform_filter # NOQA +from cupyx.scipy.ndimage._filters import gaussian_filter1d # NOQA +from cupyx.scipy.ndimage._filters import gaussian_filter # NOQA +from cupyx.scipy.ndimage._filters import prewitt # NOQA +from cupyx.scipy.ndimage._filters import sobel # NOQA +from cupyx.scipy.ndimage._filters import generic_laplace # NOQA +from cupyx.scipy.ndimage._filters import laplace # NOQA +from cupyx.scipy.ndimage._filters import gaussian_laplace # NOQA +from cupyx.scipy.ndimage._filters import generic_gradient_magnitude # NOQA +from cupyx.scipy.ndimage._filters import gaussian_gradient_magnitude # NOQA +from cupyx.scipy.ndimage._filters import minimum_filter # NOQA +from cupyx.scipy.ndimage._filters import maximum_filter # NOQA +from cupyx.scipy.ndimage._filters import minimum_filter1d # NOQA +from cupyx.scipy.ndimage._filters import maximum_filter1d # NOQA +from cupyx.scipy.ndimage._filters import median_filter # NOQA +from cupyx.scipy.ndimage._filters import rank_filter # NOQA +from cupyx.scipy.ndimage._filters import percentile_filter # NOQA +from cupyx.scipy.ndimage._filters import generic_filter # NOQA +from cupyx.scipy.ndimage._filters import generic_filter1d # NOQA + +from cupyx.scipy.ndimage._fourier import fourier_ellipsoid # NOQA +from cupyx.scipy.ndimage._fourier import fourier_gaussian # NOQA +from cupyx.scipy.ndimage._fourier import fourier_shift # NOQA +from cupyx.scipy.ndimage._fourier import fourier_uniform # NOQA + +from cupyx.scipy.ndimage._interpolation import affine_transform # NOQA +from cupyx.scipy.ndimage._interpolation import map_coordinates # NOQA +from cupyx.scipy.ndimage._interpolation import rotate # NOQA +from cupyx.scipy.ndimage._interpolation import shift # NOQA +from cupyx.scipy.ndimage._interpolation import spline_filter # NOQA +from cupyx.scipy.ndimage._interpolation import spline_filter1d # NOQA +from cupyx.scipy.ndimage._interpolation import zoom # NOQA + +from cupyx.scipy.ndimage._measurements import label # NOQA +from cupyx.scipy.ndimage._measurements import sum # NOQA +from cupyx.scipy.ndimage._measurements import sum_labels # NOQA +from cupyx.scipy.ndimage._measurements import mean # NOQA +from cupyx.scipy.ndimage._measurements import variance # NOQA +from cupyx.scipy.ndimage._measurements import standard_deviation # NOQA +from cupyx.scipy.ndimage._measurements import minimum # NOQA +from cupyx.scipy.ndimage._measurements import maximum # NOQA +from cupyx.scipy.ndimage._measurements import minimum_position # NOQA +from cupyx.scipy.ndimage._measurements import maximum_position # NOQA +from cupyx.scipy.ndimage._measurements import median # NOQA +from cupyx.scipy.ndimage._measurements import extrema # NOQA +from cupyx.scipy.ndimage._measurements import center_of_mass # NOQA +from cupyx.scipy.ndimage._measurements import histogram # NOQA +from cupyx.scipy.ndimage._measurements import labeled_comprehension # NOQA +from cupyx.scipy.ndimage._measurements import value_indices # NOQA + +from cupyx.scipy.ndimage._morphology import generate_binary_structure # NOQA +from cupyx.scipy.ndimage._morphology import iterate_structure # NOQA +from cupyx.scipy.ndimage._morphology import binary_erosion # NOQA +from cupyx.scipy.ndimage._morphology import binary_dilation # NOQA +from cupyx.scipy.ndimage._morphology import binary_opening # NOQA +from cupyx.scipy.ndimage._morphology import binary_closing # NOQA +from cupyx.scipy.ndimage._morphology import binary_hit_or_miss # NOQA +from cupyx.scipy.ndimage._morphology import binary_fill_holes # NOQA +from cupyx.scipy.ndimage._morphology import binary_propagation # NOQA +from cupyx.scipy.ndimage._morphology import grey_erosion # NOQA +from cupyx.scipy.ndimage._morphology import grey_dilation # NOQA +from cupyx.scipy.ndimage._morphology import grey_closing # NOQA +from cupyx.scipy.ndimage._morphology import grey_opening # NOQA +from cupyx.scipy.ndimage._morphology import morphological_gradient # NOQA +from cupyx.scipy.ndimage._morphology import morphological_laplace # NOQA +from cupyx.scipy.ndimage._morphology import white_tophat # NOQA +from cupyx.scipy.ndimage._morphology import black_tophat # NOQA +from cupyx.scipy.ndimage._distance_transform import distance_transform_edt # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..863ea1347dd62303839355f07afd8fc01163e10a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_distance_transform.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_distance_transform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ce5d91b9b202dd4ad6b293dbe635babe0972ecd Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_distance_transform.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba1f83adce8419f658dc260266642157fc16defe Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters_core.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..525f2b682a8505563aa92d9733d4feaa748bb047 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters_core.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters_generic.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters_generic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ad78c6bd4f35d8cda199304e6bb644229c3f4c2 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_filters_generic.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_fourier.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_fourier.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..453cfb2f6263bd07b133d723e1079c699392cfb9 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_fourier.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_interp_kernels.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_interp_kernels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c07699bedd464de26892c381f10b889407631cc3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_interp_kernels.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1accda79447cad12f1f9fedfb9b55faae55b979a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_interpolation.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ac6342f2a506628e696e85818abfb606cc85a20 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28055ac5185f3a23b64f51562afee434b6167ed9 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_pba_2d.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_pba_2d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2926b5e784e53b288205a9a44b5294b4541e8044 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_pba_2d.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_pba_3d.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_pba_3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fc35d310023311ec8b75e8cb31cf0fbbedf6008 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_pba_3d.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_spline_kernel_weights.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_spline_kernel_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d94845d2f40c76a6d8593cbb653c0cc194d6b5a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_spline_kernel_weights.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_spline_prefilter_core.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_spline_prefilter_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97bd71dd6c41e2cf87bc3a5ae722c52d91e95558 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_spline_prefilter_core.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_util.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5555353d2217f98a75ff845711191f952e74c5d2 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/__pycache__/_util.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_distance_transform.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_distance_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..1123cc6aff08ac4f5b1b92a7be32e82a77866591 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_distance_transform.py @@ -0,0 +1,181 @@ +import numbers + +from ._pba_2d import _pba_2d +from ._pba_3d import _pba_3d + + +def distance_transform_edt(image, sampling=None, return_distances=True, + return_indices=False, distances=None, indices=None, + *, block_params=None, float64_distances=True): + r"""Exact Euclidean distance transform. + + This function calculates the distance transform of the `input`, by + replacing each foreground (non-zero) element, with its shortest distance to + the background (any zero-valued element). + + In addition to the distance transform, the feature transform can be + calculated. In this case the index of the closest background element to + each foreground element is returned in a separate array. + + Parameters + ---------- + image : array_like + Input data to transform. Can be any type but will be converted into + binary: 1 wherever image equates to True, 0 elsewhere. + sampling : float, or sequence of float, optional + Spacing of elements along each dimension. If a sequence, must be of + length equal to the image rank; if a single number, this is used for + all axes. If not specified, a grid spacing of unity is implied. + return_distances : bool, optional + Whether to calculate the distance transform. + return_indices : bool, optional + Whether to calculate the feature transform. + distances : cupy.ndarray, optional + An output array to store the calculated distance transform, instead of + returning it. `return_distances` must be ``True``. It must be the same + shape as `image`. Should have dtype ``cp.float32`` if + `float64_distances` is ``False``, otherwise it should be + ``cp.float64``. + indices : cupy.ndarray, optional + An output array to store the calculated feature transform, instead of + returning it. `return_indicies` must be ``True``. Its shape must be + ``(image.ndim,) + image.shape``. Its dtype must be a signed or unsigned + integer type of at least 16-bits in 2D or 32-bits in 3D. + + Other Parameters + ---------------- + block_params : 3-tuple of int + The m1, m2, m3 algorithm parameters as described in [2]_. If None, + suitable defaults will be chosen. Note: This parameter is specific to + cuCIM and does not exist in SciPy. + float64_distances : bool, optional + If True, use double precision in the distance computation (to match + SciPy behavior). Otherwise, single precision will be used for + efficiency. Note: This parameter is specific to cuCIM and does not + exist in SciPy. + + Returns + ------- + distances : cupy.ndarray, optional + The calculated distance transform. Returned only when + `return_distances` is ``True`` and `distances` is not supplied. It will + have the same shape as `image`. Will have dtype `cp.float64` if + `float64_distances` is ``True``, otherwise it will have dtype + ``cp.float32``. + indices : ndarray, optional + The calculated feature transform. It has an image-shaped array for each + dimension of the image. See example below. Returned only when + `return_indices` is ``True`` and `indices` is not supplied. + + Notes + ----- + The Euclidean distance transform gives values of the Euclidean distance. + + .. math:: + + y_i = \sqrt{\sum_{i}^{n} (x[i] - b[i])^2} + + where :math:`b[i]` is the background point (value 0) with the smallest + Euclidean distance to input points :math:`x[i]`, and :math:`n` is the + number of dimensions. + + Note that the `indices` output may differ from the one given by + :func:`scipy.ndimage.distance_transform_edt` in the case of input pixels + that are equidistant from multiple background points. + + The parallel banding algorithm implemented here was originally described in + [1]_. The kernels used here correspond to the revised PBA+ implementation + that is described on the author's website [2]_. The source code of the + author's PBA+ implementation is available at [3]_. + + References + ---------- + .. [1] Thanh-Tung Cao, Ke Tang, Anis Mohamed, and Tiow-Seng Tan. 2010. + Parallel Banding Algorithm to compute exact distance transform with the + GPU. In Proceedings of the 2010 ACM SIGGRAPH symposium on Interactive + 3D Graphics and Games (I3D ’10). Association for Computing Machinery, + New York, NY, USA, 83–90. + DOI:https://doi.org/10.1145/1730804.1730818 + .. [2] https://www.comp.nus.edu.sg/~tants/pba.html + .. [3] https://github.com/orzzzjq/Parallel-Banding-Algorithm-plus + + Examples + -------- + >>> import cupy as cp + >>> from cucim.core.operations import morphology + >>> a = cp.array(([0,1,1,1,1], + ... [0,0,1,1,1], + ... [0,1,1,1,1], + ... [0,1,1,1,0], + ... [0,1,1,0,0])) + >>> morphology.distance_transform_edt(a) + array([[ 0. , 1. , 1.4142, 2.2361, 3. ], + [ 0. , 0. , 1. , 2. , 2. ], + [ 0. , 1. , 1.4142, 1.4142, 1. ], + [ 0. , 1. , 1.4142, 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + + With a sampling of 2 units along x, 1 along y: + + >>> morphology.distance_transform_edt(a, sampling=[2,1]) + array([[ 0. , 1. , 2. , 2.8284, 3.6056], + [ 0. , 0. , 1. , 2. , 3. ], + [ 0. , 1. , 2. , 2.2361, 2. ], + [ 0. , 1. , 2. , 1. , 0. ], + [ 0. , 1. , 1. , 0. , 0. ]]) + + Asking for indices as well: + + >>> edt, inds = morphology.distance_transform_edt(a, return_indices=True) + >>> inds + array([[[0, 0, 1, 1, 3], + [1, 1, 1, 1, 3], + [2, 2, 1, 3, 3], + [3, 3, 4, 4, 3], + [4, 4, 4, 4, 4]], + [[0, 0, 1, 1, 4], + [0, 1, 1, 1, 4], + [0, 0, 1, 4, 4], + [0, 0, 3, 3, 4], + [0, 0, 3, 3, 4]]]) + + """ + scalar_sampling = None + if sampling is not None: + if isinstance(sampling, numbers.Number): + sampling = (sampling,) + if len(set(sampling)) == 1: + # In the isotropic case, can use the kernels without sample scaling + # and just adjust the final distance accordingly. + scalar_sampling = float(sampling[0]) + sampling = None + + if image.ndim == 3: + pba_func = _pba_3d + elif image.ndim == 2: + pba_func = _pba_2d + else: + raise NotImplementedError( + "Only 2D and 3D distance transforms are supported.") + + vals = pba_func( + image, + sampling=sampling, + return_distances=return_distances, + return_indices=return_indices, + block_params=block_params, + distances=distances, + indices=indices, + float64_distances=float64_distances, + ) + + if return_distances and scalar_sampling is not None: + # inplace multiply in case distance != None + vals = list(vals) + vals[0] *= scalar_sampling + vals = tuple(vals) + + if len(vals) == 1: + vals = vals[0] + + return vals diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters.py new file mode 100644 index 0000000000000000000000000000000000000000..f8a3affc214d1f7e5212a1a8ad46a347106dc9df --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters.py @@ -0,0 +1,1255 @@ +import numpy + +import cupy + +from cupy import _core +from cupy._core import internal +from cupyx.scipy.ndimage import _util +from cupyx.scipy.ndimage import _filters_core +from cupyx.scipy.ndimage import _filters_generic + + +def correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0): + """Multi-dimensional correlate. + + The array is correlated with the given kernel. + + Args: + input (cupy.ndarray): The input array. + weights (cupy.ndarray): Array of weights, same number of dimensions as + input + output (cupy.ndarray, dtype or None): The array in which to place the + output. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``constant``. Default is ``0.0``. + origin (scalar or tuple of scalar): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of correlate. + + .. seealso:: :func:`scipy.ndimage.correlate` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + return _correlate_or_convolve(input, weights, output, mode, cval, origin) + + +def convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0): + """Multi-dimensional convolution. + + The array is convolved with the given kernel. + + Args: + input (cupy.ndarray): The input array. + weights (cupy.ndarray): Array of weights, same number of dimensions as + input + output (cupy.ndarray, dtype or None): The array in which to place the + output. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``constant``. Default is ``0.0``. + origin (scalar or tuple of scalar): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of convolution. + + .. seealso:: :func:`scipy.ndimage.convolve` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + return _correlate_or_convolve(input, weights, output, mode, cval, origin, + True) + + +def correlate1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, + origin=0): + """One-dimensional correlate. + + The array is correlated with the given kernel. + + Args: + input (cupy.ndarray): The input array. + weights (cupy.ndarray): One-dimensional array of weights + axis (int): The axis of input along which to calculate. Default is -1. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int): The origin parameter controls the placement of the + filter, relative to the center of the current element of the + input. Default is ``0``. + + Returns: + cupy.ndarray: The result of the 1D correlation. + + .. seealso:: :func:`scipy.ndimage.correlate1d` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + weights, origins = _filters_core._convert_1d_args(input.ndim, weights, + origin, axis) + return _correlate_or_convolve(input, weights, output, mode, cval, origins) + + +def convolve1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, + origin=0): + """One-dimensional convolution. + + The array is convolved with the given kernel. + + Args: + input (cupy.ndarray): The input array. + weights (cupy.ndarray): One-dimensional array of weights + axis (int): The axis of input along which to calculate. Default is -1. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int): The origin parameter controls the placement of the + filter, relative to the center of the current element of the + input. Default is ``0``. + Returns: + cupy.ndarray: The result of the 1D convolution. + + .. seealso:: :func:`scipy.ndimage.convolve1d` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + weights, origins = _filters_core._convert_1d_args(input.ndim, weights, + origin, axis) + return _correlate_or_convolve(input, weights, output, mode, cval, origins, + True) + + +def _correlate_or_convolve(input, weights, output, mode, cval, origin, + convolution=False): + origins, int_type = _filters_core._check_nd_args(input, weights, + mode, origin) + if weights.size == 0: + return cupy.zeros_like(input) + + _util._check_cval(mode, cval, _util._is_integer_output(output, input)) + + if convolution: + weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] + origins = list(origins) + for i, wsize in enumerate(weights.shape): + origins[i] = -origins[i] + if wsize % 2 == 0: + origins[i] -= 1 + origins = tuple(origins) + elif weights.dtype.kind == "c": + # numpy.correlate conjugates weights rather than input. + weights = weights.conj() + weights_dtype = _util._get_weights_dtype(input, weights) + offsets = _filters_core._origins_to_offsets(origins, weights.shape) + kernel = _get_correlate_kernel(mode, weights.shape, int_type, + offsets, cval) + output = _filters_core._call_kernel(kernel, input, weights, output, + weights_dtype=weights_dtype) + return output + + +@cupy._util.memoize(for_each_device=True) +def _get_correlate_kernel(mode, w_shape, int_type, offsets, cval): + return _filters_core._generate_nd_kernel( + 'correlate', + 'W sum = (W)0;', + 'sum += cast({value}) * wval;', + 'y = cast(sum);', + mode, w_shape, int_type, offsets, cval, ctype='W') + + +def _run_1d_correlates(input, params, get_weights, output, mode, cval, + origin=0): + """ + Enhanced version of _run_1d_filters that uses correlate1d as the filter + function. The params are a list of values to pass to the get_weights + callable given. If duplicate param values are found, the weights are + reused from the first invocation of get_weights. The get_weights callable + must return a 1D array of weights to give to correlate1d. + """ + wghts = {} + for param in params: + if param not in wghts: + wghts[param] = get_weights(param) + wghts = [wghts[param] for param in params] + return _filters_core._run_1d_filters( + [None if w is None else correlate1d for w in wghts], + input, wghts, output, mode, cval, origin) + + +def uniform_filter1d(input, size, axis=-1, output=None, mode="reflect", + cval=0.0, origin=0): + """One-dimensional uniform filter along the given axis. + + The lines of the array along the given axis are filtered with a uniform + filter of the given size. + + Args: + input (cupy.ndarray): The input array. + size (int): Length of the uniform filter. + axis (int): The axis of input along which to calculate. Default is -1. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int): The origin parameter controls the placement of the + filter, relative to the center of the current element of the + input. Default is ``0``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.uniform_filter1d` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + weights_dtype = _util._init_weights_dtype(input) + weights = cupy.full(size, 1 / size, dtype=weights_dtype) + return correlate1d(input, weights, axis, output, mode, cval, origin) + + +def uniform_filter(input, size=3, output=None, mode="reflect", cval=0.0, + origin=0): + """Multi-dimensional uniform filter. + + Args: + input (cupy.ndarray): The input array. + size (int or sequence of int): Lengths of the uniform filter for each + dimension. A single value applies to all axes. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int or sequence of int): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of ``0`` is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.uniform_filter` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + sizes = _util._fix_sequence_arg(size, input.ndim, 'size', int) + weights_dtype = _util._init_weights_dtype(input) + + def get(size, dtype=weights_dtype): + return None if size <= 1 else cupy.full(size, 1 / size, dtype=dtype) + + return _run_1d_correlates(input, sizes, get, output, mode, cval, origin) + + +def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None, + mode="reflect", cval=0.0, truncate=4.0): + """One-dimensional Gaussian filter along the given axis. + + The lines of the array along the given axis are filtered with a Gaussian + filter of the given standard deviation. + + Args: + input (cupy.ndarray): The input array. + sigma (scalar): Standard deviation for Gaussian kernel. + axis (int): The axis of input along which to calculate. Default is -1. + order (int): An order of ``0``, the default, corresponds to convolution + with a Gaussian kernel. A positive order corresponds to convolution + with that derivative of a Gaussian. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + truncate (float): Truncate the filter at this many standard deviations. + Default is ``4.0``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.gaussian_filter1d` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + radius = int(float(truncate) * float(sigma) + 0.5) + weights_dtype = _util._init_weights_dtype(input) + weights = _gaussian_kernel1d( + sigma, int(order), radius, dtype=weights_dtype + ) + return correlate1d(input, weights, axis, output, mode, cval) + + +def gaussian_filter(input, sigma, order=0, output=None, mode="reflect", + cval=0.0, truncate=4.0): + """Multi-dimensional Gaussian filter. + + Args: + input (cupy.ndarray): The input array. + sigma (scalar or sequence of scalar): Standard deviations for each axis + of Gaussian kernel. A single value applies to all axes. + order (int or sequence of scalar): An order of ``0``, the default, + corresponds to convolution with a Gaussian kernel. A positive order + corresponds to convolution with that derivative of a Gaussian. A + single value applies to all axes. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + truncate (float): Truncate the filter at this many standard deviations. + Default is ``4.0``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.gaussian_filter` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + sigmas = _util._fix_sequence_arg(sigma, input.ndim, 'sigma', float) + orders = _util._fix_sequence_arg(order, input.ndim, 'order', int) + truncate = float(truncate) + weights_dtype = _util._init_weights_dtype(input) + + def get(param): + sigma, order = param + radius = int(truncate * float(sigma) + 0.5) + if radius <= 0: + return None + return _gaussian_kernel1d(sigma, order, radius, dtype=weights_dtype) + + return _run_1d_correlates(input, list(zip(sigmas, orders)), get, output, + mode, cval, 0) + + +def _gaussian_kernel1d(sigma, order, radius, dtype=cupy.float64): + """ + Computes a 1-D Gaussian correlation kernel. + """ + if order < 0: + raise ValueError('order must be non-negative') + sigma2 = sigma * sigma + x = numpy.arange(-radius, radius+1) + phi_x = numpy.exp(-0.5 / sigma2 * x ** 2) + phi_x /= phi_x.sum() + + if order == 0: + return cupy.asarray(phi_x) + + # f(x) = q(x) * phi(x) = q(x) * exp(p(x)) + # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x) + # p'(x) = -1 / sigma ** 2 + # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the + # coefficients of q(x) + exponent_range = numpy.arange(order + 1) + q = numpy.zeros(order + 1) + q[0] = 1 + D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x) + P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x) + Q_deriv = D + P + for _ in range(order): + q = Q_deriv.dot(q) + q = (x[:, None] ** exponent_range).dot(q) + return cupy.asarray((q * phi_x)[::-1], dtype=dtype) + + +def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0): + """Compute a Prewitt filter along the given axis. + + Args: + input (cupy.ndarray): The input array. + axis (int): The axis of input along which to calculate. Default is -1. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.prewitt` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + weights_dtype = _util._init_weights_dtype(input) + weights = cupy.ones(3, dtype=weights_dtype) + return _prewitt_or_sobel(input, axis, output, mode, cval, weights) + + +def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0): + """Compute a Sobel filter along the given axis. + + Args: + input (cupy.ndarray): The input array. + axis (int): The axis of input along which to calculate. Default is -1. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.sobel` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + weights_dtype = _util._init_weights_dtype(input) + return _prewitt_or_sobel(input, axis, output, mode, cval, + cupy.array([1, 2, 1], dtype=weights_dtype)) + + +def _prewitt_or_sobel(input, axis, output, mode, cval, weights): + axis = internal._normalize_axis_index(axis, input.ndim) + + def get(is_diff): + return cupy.array([-1, 0, 1], dtype=weights.dtype) if is_diff else weights # noqa + + return _run_1d_correlates(input, [a == axis for a in range(input.ndim)], + get, output, mode, cval) + + +def generic_laplace(input, derivative2, output=None, mode="reflect", + cval=0.0, extra_arguments=(), extra_keywords=None): + """Multi-dimensional Laplace filter using a provided second derivative + function. + + Args: + input (cupy.ndarray): The input array. + derivative2 (callable): Function or other callable with the following + signature that is called once per axis:: + + derivative2(input, axis, output, mode, cval, + *extra_arguments, **extra_keywords) + + where ``input`` and ``output`` are ``cupy.ndarray``, ``axis`` is an + ``int`` from ``0`` to the number of dimensions, and ``mode``, + ``cval``, ``extra_arguments``, ``extra_keywords`` are the values + given to this function. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + extra_arguments (sequence, optional): + Sequence of extra positional arguments to pass to ``derivative2``. + extra_keywords (dict, optional): + dict of extra keyword arguments to pass ``derivative2``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.generic_laplace` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + if extra_keywords is None: + extra_keywords = {} + ndim = input.ndim + modes = _util._fix_sequence_arg(mode, ndim, 'mode', + _util._check_mode) + output = _util._get_output(output, input) + if ndim == 0: + _core.elementwise_copy(input, output) + return output + derivative2(input, 0, output, modes[0], cval, + *extra_arguments, **extra_keywords) + if ndim > 1: + tmp = _util._get_output(output.dtype, input) + for i in range(1, ndim): + derivative2(input, i, tmp, modes[i], cval, + *extra_arguments, **extra_keywords) + output += tmp + return output + + +def laplace(input, output=None, mode="reflect", cval=0.0): + """Multi-dimensional Laplace filter based on approximate second + derivatives. + + Args: + input (cupy.ndarray): The input array. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.laplace` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + weights_dtype = _util._init_weights_dtype(input) + weights = cupy.array([1, -2, 1], dtype=weights_dtype) + + def derivative2(input, axis, output, mode, cval): + return correlate1d(input, weights, axis, output, mode, cval) + + return generic_laplace(input, derivative2, output, mode, cval) + + +def gaussian_laplace(input, sigma, output=None, mode="reflect", + cval=0.0, **kwargs): + """Multi-dimensional Laplace filter using Gaussian second derivatives. + + Args: + input (cupy.ndarray): The input array. + sigma (scalar or sequence of scalar): Standard deviations for each axis + of Gaussian kernel. A single value applies to all axes. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + kwargs (dict, optional): + dict of extra keyword arguments to pass ``gaussian_filter()``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.gaussian_laplace` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + def derivative2(input, axis, output, mode, cval): + order = [0] * input.ndim + order[axis] = 2 + return gaussian_filter(input, sigma, order, output, mode, cval, + **kwargs) + return generic_laplace(input, derivative2, output, mode, cval) + + +def generic_gradient_magnitude(input, derivative, output=None, + mode="reflect", cval=0.0, + extra_arguments=(), extra_keywords=None): + """Multi-dimensional gradient magnitude filter using a provided derivative + function. + + Args: + input (cupy.ndarray): The input array. + derivative (callable): Function or other callable with the following + signature that is called once per axis:: + + derivative(input, axis, output, mode, cval, + *extra_arguments, **extra_keywords) + + where ``input`` and ``output`` are ``cupy.ndarray``, ``axis`` is an + ``int`` from ``0`` to the number of dimensions, and ``mode``, + ``cval``, ``extra_arguments``, ``extra_keywords`` are the values + given to this function. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + extra_arguments (sequence, optional): + Sequence of extra positional arguments to pass to ``derivative2``. + extra_keywords (dict, optional): + dict of extra keyword arguments to pass ``derivative2``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.generic_gradient_magnitude` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + if extra_keywords is None: + extra_keywords = {} + ndim = input.ndim + modes = _util._fix_sequence_arg(mode, ndim, 'mode', + _util._check_mode) + output = _util._get_output(output, input) + if ndim == 0: + _core.elementwise_copy(input, output) + return output + derivative(input, 0, output, modes[0], cval, + *extra_arguments, **extra_keywords) + output *= output + if ndim > 1: + tmp = _util._get_output(output.dtype, input) + for i in range(1, ndim): + derivative(input, i, tmp, modes[i], cval, + *extra_arguments, **extra_keywords) + tmp *= tmp + output += tmp + return cupy.sqrt(output, output, casting='unsafe') + + +def gaussian_gradient_magnitude(input, sigma, output=None, mode="reflect", + cval=0.0, **kwargs): + """Multi-dimensional gradient magnitude using Gaussian derivatives. + + Args: + input (cupy.ndarray): The input array. + sigma (scalar or sequence of scalar): Standard deviations for each axis + of Gaussian kernel. A single value applies to all axes. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + kwargs (dict, optional): + dict of extra keyword arguments to pass ``gaussian_filter()``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.gaussian_gradient_magnitude` + + .. note:: + When the output data type is integral (or when no output is provided + and input is integral) the results may not perfectly match the results + from SciPy due to floating-point rounding of intermediate results. + """ + def derivative(input, axis, output, mode, cval): + order = [0] * input.ndim + order[axis] = 1 + return gaussian_filter(input, sigma, order, output, mode, cval, + **kwargs) + return generic_gradient_magnitude(input, derivative, output, mode, cval) + + +def minimum_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0): + """Multi-dimensional minimum filter. + + Args: + input (cupy.ndarray): The input array. + size (int or sequence of int): One of ``size`` or ``footprint`` must be + provided. If ``footprint`` is given, ``size`` is ignored. Otherwise + ``footprint = cupy.ones(size)`` with ``size`` automatically made to + match the number of dimensions in ``input``. + footprint (cupy.ndarray): a boolean array which specifies which of the + elements within this shape will get passed to the filter function. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int or sequence of int): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.minimum_filter` + """ + return _min_or_max_filter(input, size, footprint, None, output, mode, + cval, origin, 'min') + + +def maximum_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0): + """Multi-dimensional maximum filter. + + Args: + input (cupy.ndarray): The input array. + size (int or sequence of int): One of ``size`` or ``footprint`` must be + provided. If ``footprint`` is given, ``size`` is ignored. Otherwise + ``footprint = cupy.ones(size)`` with ``size`` automatically made to + match the number of dimensions in ``input``. + footprint (cupy.ndarray): a boolean array which specifies which of the + elements within this shape will get passed to the filter function. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int or sequence of int): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.maximum_filter` + """ + return _min_or_max_filter(input, size, footprint, None, output, mode, + cval, origin, 'max') + + +def _min_or_max_filter(input, size, ftprnt, structure, output, mode, cval, + origin, func): + # structure is used by morphology.grey_erosion() and grey_dilation() + # and not by the regular min/max filters + + sizes, ftprnt, structure = _filters_core._check_size_footprint_structure( + input.ndim, size, ftprnt, structure) + if cval is cupy.nan: + raise NotImplementedError("NaN cval is unsupported") + + if sizes is not None: + # Separable filter, run as a series of 1D filters + fltr = minimum_filter1d if func == 'min' else maximum_filter1d + return _filters_core._run_1d_filters( + [fltr if size > 1 else None for size in sizes], + input, sizes, output, mode, cval, origin) + + origins, int_type = _filters_core._check_nd_args(input, ftprnt, + mode, origin, 'footprint') + if structure is not None and structure.ndim != input.ndim: + raise RuntimeError('structure array has incorrect shape') + + if ftprnt.size == 0: + return cupy.zeros_like(input) + offsets = _filters_core._origins_to_offsets(origins, ftprnt.shape) + kernel = _get_min_or_max_kernel(mode, ftprnt.shape, func, + offsets, float(cval), int_type, + has_structure=structure is not None, + has_central_value=bool(ftprnt[offsets])) + return _filters_core._call_kernel(kernel, input, ftprnt, output, + structure, weights_dtype=bool) + + +def minimum_filter1d(input, size, axis=-1, output=None, mode="reflect", + cval=0.0, origin=0): + """Compute the minimum filter along a single axis. + + Args: + input (cupy.ndarray): The input array. + size (int): Length of the minimum filter. + axis (int): The axis of input along which to calculate. Default is -1. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int): The origin parameter controls the placement of the + filter, relative to the center of the current element of the + input. Default is ``0``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.minimum_filter1d` + """ + return _min_or_max_1d(input, size, axis, output, mode, cval, origin, 'min') + + +def maximum_filter1d(input, size, axis=-1, output=None, mode="reflect", + cval=0.0, origin=0): + """Compute the maximum filter along a single axis. + + Args: + input (cupy.ndarray): The input array. + size (int): Length of the maximum filter. + axis (int): The axis of input along which to calculate. Default is -1. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int): The origin parameter controls the placement of the + filter, relative to the center of the current element of the + input. Default is ``0``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.maximum_filter1d` + """ + return _min_or_max_1d(input, size, axis, output, mode, cval, origin, 'max') + + +def _min_or_max_1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, + origin=0, func='min'): + ftprnt = cupy.ones(size, dtype=bool) + ftprnt, origin = _filters_core._convert_1d_args(input.ndim, ftprnt, + origin, axis) + origins, int_type = _filters_core._check_nd_args(input, ftprnt, + mode, origin, 'footprint') + offsets = _filters_core._origins_to_offsets(origins, ftprnt.shape) + kernel = _get_min_or_max_kernel(mode, ftprnt.shape, func, offsets, + float(cval), int_type, has_weights=False) + return _filters_core._call_kernel(kernel, input, None, output, + weights_dtype=bool) + + +@cupy._util.memoize(for_each_device=True) +def _get_min_or_max_kernel(mode, w_shape, func, offsets, cval, int_type, + has_weights=True, has_structure=False, + has_central_value=True): + # When there are no 'weights' (the footprint, for the 1D variants) then + # we need to make sure intermediate results are stored as doubles for + # consistent results with scipy. + ctype = 'X' if has_weights else 'double' + value = '{value}' + if not has_weights: + value = 'cast({})'.format(value) + + # Having a non-flat structure biases the values + if has_structure: + value += ('-' if func == 'min' else '+') + 'cast(sval)' + + if has_central_value: + pre = '{} value = x[i];' + found = 'value = {func}({value}, value);' + else: + # If the central pixel is not included in the footprint we cannot + # assume `x[i]` is not below the min or above the max and thus cannot + # seed with that value. Instead we keep track of having set `value`. + pre = '{} value; bool set = false;' + found = 'value = set ? {func}({value}, value) : {value}; set=true;' + + return _filters_core._generate_nd_kernel( + func, pre.format(ctype), + found.format(func=func, value=value), 'y = cast(value);', + mode, w_shape, int_type, offsets, cval, ctype=ctype, + has_weights=has_weights, has_structure=has_structure) + + +def rank_filter(input, rank, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0): + """Multi-dimensional rank filter. + + Args: + input (cupy.ndarray): The input array. + rank (int): The rank of the element to get. Can be negative to count + from the largest value, e.g. ``-1`` indicates the largest value. + size (int or sequence of int): One of ``size`` or ``footprint`` must be + provided. If ``footprint`` is given, ``size`` is ignored. Otherwise + ``footprint = cupy.ones(size)`` with ``size`` automatically made to + match the number of dimensions in ``input``. + footprint (cupy.ndarray): a boolean array which specifies which of the + elements within this shape will get passed to the filter function. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int or sequence of int): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.rank_filter` + """ + rank = int(rank) + return _rank_filter(input, lambda fs: rank+fs if rank < 0 else rank, + size, footprint, output, mode, cval, origin) + + +def median_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0): + """Multi-dimensional median filter. + + Args: + input (cupy.ndarray): The input array. + size (int or sequence of int): One of ``size`` or ``footprint`` must be + provided. If ``footprint`` is given, ``size`` is ignored. Otherwise + ``footprint = cupy.ones(size)`` with ``size`` automatically made to + match the number of dimensions in ``input``. + footprint (cupy.ndarray): a boolean array which specifies which of the + elements within this shape will get passed to the filter function. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int or sequence of int): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.median_filter` + """ + return _rank_filter(input, lambda fs: fs//2, + size, footprint, output, mode, cval, origin) + + +def percentile_filter(input, percentile, size=None, footprint=None, + output=None, mode="reflect", cval=0.0, origin=0): + """Multi-dimensional percentile filter. + + Args: + input (cupy.ndarray): The input array. + percentile (scalar): The percentile of the element to get (from ``0`` + to ``100``). Can be negative, thus ``-20`` equals ``80``. + size (int or sequence of int): One of ``size`` or ``footprint`` must be + provided. If ``footprint`` is given, ``size`` is ignored. Otherwise + ``footprint = cupy.ones(size)`` with ``size`` automatically made to + match the number of dimensions in ``input``. + footprint (cupy.ndarray): a boolean array which specifies which of the + elements within this shape will get passed to the filter function. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int or sequence of int): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. seealso:: :func:`scipy.ndimage.percentile_filter` + """ + percentile = float(percentile) + if percentile < 0.0: + percentile += 100.0 + if percentile < 0.0 or percentile > 100.0: + raise RuntimeError('invalid percentile') + if percentile == 100.0: + def get_rank(fs): + return fs - 1 + else: + def get_rank(fs): + return int(float(fs) * percentile / 100.0) + return _rank_filter(input, get_rank, + size, footprint, output, mode, cval, origin) + + +def _rank_filter(input, get_rank, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0): + _, footprint, _ = _filters_core._check_size_footprint_structure( + input.ndim, size, footprint, None, force_footprint=True) + if cval is cupy.nan: + raise NotImplementedError("NaN cval is unsupported") + origins, int_type = _filters_core._check_nd_args(input, footprint, + mode, origin, 'footprint') + if footprint.size == 0: + return cupy.zeros_like(input) + filter_size = int(footprint.sum()) + rank = get_rank(filter_size) + if rank < 0 or rank >= filter_size: + raise RuntimeError('rank not within filter footprint size') + if rank == 0: + return _min_or_max_filter(input, None, footprint, None, output, mode, + cval, origins, 'min') + if rank == filter_size - 1: + return _min_or_max_filter(input, None, footprint, None, output, mode, + cval, origins, 'max') + offsets = _filters_core._origins_to_offsets(origins, footprint.shape) + kernel = _get_rank_kernel(filter_size, rank, mode, footprint.shape, + offsets, float(cval), int_type) + return _filters_core._call_kernel(kernel, input, footprint, output, + weights_dtype=bool) + + +__SHELL_SORT = ''' +__device__ void sort(X *array, int size) {{ + int gap = {gap}; + while (gap > 1) {{ + gap /= 3; + for (int i = gap; i < size; ++i) {{ + X value = array[i]; + int j = i - gap; + while (j >= 0 && value < array[j]) {{ + array[j + gap] = array[j]; + j -= gap; + }} + array[j + gap] = value; + }} + }} +}}''' + + +@cupy._util.memoize() +def _get_shell_gap(filter_size): + gap = 1 + while gap < filter_size: + gap = 3*gap+1 + return gap + + +@cupy._util.memoize(for_each_device=True) +def _get_rank_kernel(filter_size, rank, mode, w_shape, offsets, cval, + int_type): + s_rank = min(rank, filter_size - rank - 1) + # The threshold was set based on the measurements on a V100 + # TODO(leofang, anaruse): Use Optuna to automatically tune the threshold, + # as it may vary depending on the GPU in use, compiler version, dtype, + # filter size, etc. + if s_rank <= 80: + # When s_rank is small and register usage is low, this partial + # selection sort approach is faster than general sorting approach + # using shell sort. + if s_rank == rank: + comp_op = '<' + else: + comp_op = '>' + array_size = s_rank + 2 + found_post = ''' + if (iv > {rank} + 1) {{{{ + int target_iv = 0; + X target_val = values[0]; + for (int jv = 1; jv <= {rank} + 1; jv++) {{{{ + if (target_val {comp_op} values[jv]) {{{{ + target_val = values[jv]; + target_iv = jv; + }}}} + }}}} + if (target_iv <= {rank}) {{{{ + values[target_iv] = values[{rank} + 1]; + }}}} + iv = {rank} + 1; + }}}}'''.format(rank=s_rank, comp_op=comp_op) + post = ''' + X target_val = values[0]; + for (int jv = 1; jv <= {rank}; jv++) {{ + if (target_val {comp_op} values[jv]) {{ + target_val = values[jv]; + }} + }} + y=cast(target_val);'''.format(rank=s_rank, comp_op=comp_op) + sorter = '' + else: + array_size = filter_size + found_post = '' + post = 'sort(values,{});\ny=cast(values[{}]);'.format( + filter_size, rank) + sorter = __SHELL_SORT.format(gap=_get_shell_gap(filter_size)) + + return _filters_core._generate_nd_kernel( + 'rank_{}_{}'.format(filter_size, rank), + 'int iv = 0;\nX values[{}];'.format(array_size), + 'values[iv++] = {value};' + found_post, post, + mode, w_shape, int_type, offsets, cval, preamble=sorter) + + +def generic_filter(input, function, size=None, footprint=None, + output=None, mode="reflect", cval=0.0, origin=0): + """Compute a multi-dimensional filter using the provided raw kernel or + reduction kernel. + + Unlike the scipy.ndimage function, this does not support the + ``extra_arguments`` or ``extra_keywordsdict`` arguments and has significant + restrictions on the ``function`` provided. + + Args: + input (cupy.ndarray): The input array. + function (cupy.ReductionKernel or cupy.RawKernel): + The kernel or function to apply to each region. + size (int or sequence of int): One of ``size`` or ``footprint`` must be + provided. If ``footprint`` is given, ``size`` is ignored. Otherwise + ``footprint = cupy.ones(size)`` with ``size`` automatically made to + match the number of dimensions in ``input``. + footprint (cupy.ndarray): a boolean array which specifies which of the + elements within this shape will get passed to the filter function. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (scalar or tuple of scalar): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. note:: + If the `function` is a :class:`cupy.RawKernel` then it must be for a + function that has the following signature. Unlike most functions, this + should not utilize `blockDim`/`blockIdx`/`threadIdx`:: + + __global__ void func(double *buffer, int filter_size, + double *return_value) + + If the `function` is a :class:`cupy.ReductionKernel` then it must be + for a kernel that takes 1 array input and produces 1 'scalar' output. + + .. seealso:: :func:`scipy.ndimage.generic_filter` + """ + _, footprint, _ = _filters_core._check_size_footprint_structure( + input.ndim, size, footprint, None, 2, True) + filter_size = int(footprint.sum()) + origins, int_type = _filters_core._check_nd_args(input, footprint, + mode, origin, 'footprint') + in_dtype = input.dtype + sub = _filters_generic._get_sub_kernel(function) + if footprint.size == 0: + return cupy.zeros_like(input) + output = _util._get_output(output, input) + offsets = _filters_core._origins_to_offsets(origins, footprint.shape) + args = (filter_size, mode, footprint.shape, offsets, float(cval), int_type) + if isinstance(sub, cupy.RawKernel): + kernel = _filters_generic._get_generic_filter_raw(sub, *args) + elif isinstance(sub, cupy.ReductionKernel): + kernel = _filters_generic._get_generic_filter_red( + sub, in_dtype, output.dtype, *args) + return _filters_core._call_kernel(kernel, input, footprint, output, + weights_dtype=bool) + + +def generic_filter1d(input, function, filter_size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Compute a 1D filter along the given axis using the provided raw kernel. + + Unlike the scipy.ndimage function, this does not support the + ``extra_arguments`` or ``extra_keywordsdict`` arguments and has significant + restrictions on the ``function`` provided. + + Args: + input (cupy.ndarray): The input array. + function (cupy.RawKernel): The kernel to apply along each axis. + filter_size (int): Length of the filter. + axis (int): The axis of input along which to calculate. Default is -1. + output (cupy.ndarray, dtype or None): The array in which to place the + output. Default is is same dtype as the input. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``'constant'``. Default is ``0.0``. + origin (int): The origin parameter controls the placement of the + filter, relative to the center of the current element of the + input. Default is ``0``. + + Returns: + cupy.ndarray: The result of the filtering. + + .. note:: + The provided function (as a RawKernel) must have the following + signature. Unlike most functions, this should not utilize + `blockDim`/`blockIdx`/`threadIdx`:: + + __global__ void func(double *input_line, ptrdiff_t input_length, + double *output_line, ptrdiff_t output_length) + + .. seealso:: :func:`scipy.ndimage.generic_filter1d` + """ + # This filter is very different than all other filters (including + # generic_filter and all 1d filters) and it has a customized solution. + # It is also likely fairly terrible, but only so much can be done when + # matching the scipy interface of having the sub-kernel work on entire + # lines of data. + if input.dtype.kind == 'c': + raise TypeError('Complex type not supported') + if not isinstance(function, cupy.RawKernel): + raise TypeError('bad function type') + if filter_size < 1: + raise RuntimeError('invalid filter size') + axis = internal._normalize_axis_index(axis, input.ndim) + origin = _util._check_origin(origin, filter_size) + _util._check_mode(mode) + output = _util._get_output(output, input) + in_ctype = cupy._core._scalar.get_typename(input.dtype) + out_ctype = cupy._core._scalar.get_typename(output.dtype) + int_type = _util._get_inttype(input) + n_lines = input.size // input.shape[axis] + kernel = _filters_generic._get_generic_filter1d( + function, input.shape[axis], n_lines, filter_size, + origin, mode, float(cval), in_ctype, out_ctype, int_type) + data = cupy.array( + (axis, input.ndim) + input.shape + input.strides + output.strides, + dtype=cupy.int32 if int_type == 'int' else cupy.int64) + kernel(((n_lines+128-1) // 128,), (128,), (input, output, data)) + return output diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters_core.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters_core.py new file mode 100644 index 0000000000000000000000000000000000000000..c22ab3ff69925f8b9d4884a3e1f8f8c903b8cf9f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters_core.py @@ -0,0 +1,308 @@ +import warnings + +import numpy +import cupy + +from cupy_backends.cuda.api import runtime +from cupy import _core +from cupy._core import internal +from cupyx.scipy.ndimage import _util + + +def _origins_to_offsets(origins, w_shape): + return tuple(x//2+o for x, o in zip(w_shape, origins)) + + +def _check_size_footprint_structure(ndim, size, footprint, structure, + stacklevel=3, force_footprint=False): + if structure is None and footprint is None: + if size is None: + raise RuntimeError("no footprint or filter size provided") + sizes = _util._fix_sequence_arg(size, ndim, 'size', int) + if force_footprint: + return None, cupy.ones(sizes, bool), None + return sizes, None, None + if size is not None: + warnings.warn("ignoring size because {} is set".format( + 'structure' if footprint is None else 'footprint'), + UserWarning, stacklevel=stacklevel+1) + + if footprint is not None: + footprint = cupy.array(footprint, bool, True, 'C') + if not footprint.any(): + raise ValueError("all-zero footprint is not supported") + + if structure is None: + if not force_footprint and footprint.all(): + if footprint.ndim != ndim: + raise RuntimeError("size must have length equal to input rank") + return footprint.shape, None, None + return None, footprint, None + + structure = cupy.ascontiguousarray(structure) + if footprint is None: + footprint = cupy.ones(structure.shape, bool) + return None, footprint, structure + + +def _convert_1d_args(ndim, weights, origin, axis): + if weights.ndim != 1 or weights.size < 1: + raise RuntimeError('incorrect filter size') + axis = internal._normalize_axis_index(axis, ndim) + w_shape = [1]*ndim + w_shape[axis] = weights.size + weights = weights.reshape(w_shape) + origins = [0]*ndim + origins[axis] = _util._check_origin(origin, weights.size) + return weights, tuple(origins) + + +def _check_nd_args(input, weights, mode, origin, wghts_name='filter weights'): + _util._check_mode(mode) + # Weights must always be less than 2 GiB + if weights.nbytes >= (1 << 31): + raise RuntimeError('weights must be 2 GiB or less, use FFTs instead') + weight_dims = [x for x in weights.shape if x != 0] + if len(weight_dims) != input.ndim: + raise RuntimeError('{} array has incorrect shape'.format(wghts_name)) + origins = _util._fix_sequence_arg(origin, len(weight_dims), 'origin', int) + for origin, width in zip(origins, weight_dims): + _util._check_origin(origin, width) + return tuple(origins), _util._get_inttype(input) + + +def _run_1d_filters(filters, input, args, output, mode, cval, origin=0): + """ + Runs a series of 1D filters forming an nd filter. The filters must be a + list of callables that take input, arg, axis, output, mode, cval, origin. + The args is a list of values that are passed for the arg value to the + filter. Individual filters can be None causing that axis to be skipped. + """ + output = _util._get_output(output, input) + modes = _util._fix_sequence_arg(mode, input.ndim, 'mode', + _util._check_mode) + # for filters, "wrap" is a synonym for "grid-wrap". + modes = ['grid-wrap' if m == 'wrap' else m for m in modes] + origins = _util._fix_sequence_arg(origin, input.ndim, 'origin', int) + n_filters = sum(filter is not None for filter in filters) + if n_filters == 0: + _core.elementwise_copy(input, output) + return output + # We can't operate in-place efficiently, so use a 2-buffer system + temp = _util._get_output(output.dtype, input) if n_filters > 1 else None + iterator = zip(filters, args, modes, origins) + # skip any axes where the filter is None + for axis, (fltr, arg, mode, origin) in enumerate(iterator): + if fltr is not None: + break + # To avoid need for any additional copies, we have to start with a + # different output array depending on whether the total number of filters + # is odd or even. + if n_filters % 2 == 0: + fltr(input, arg, axis, temp, mode, cval, origin) + input = temp + else: + fltr(input, arg, axis, output, mode, cval, origin) + input, output = output, temp + for axis, (fltr, arg, mode, origin) in enumerate(iterator, start=axis + 1): + if fltr is None: + continue + fltr(input, arg, axis, output, mode, cval, origin) + input, output = output, input + return input + + +def _call_kernel(kernel, input, weights, output, structure=None, + weights_dtype=numpy.float64, structure_dtype=numpy.float64): + """ + Calls a constructed ElementwiseKernel. The kernel must take an input image, + an optional array of weights, an optional array for the structure, and an + output array. + + weights and structure can be given as None (structure defaults to None) in + which case they are not passed to the kernel at all. If the output is given + as None then it will be allocated in this function. + + This function deals with making sure that the weights and structure are + contiguous and float64 (or bool for weights that are footprints)*, that the + output is allocated and appriopately shaped. This also deals with the + situation that the input and output arrays overlap in memory. + + * weights is always cast to float64 or bool in order to get an output + compatible with SciPy, though float32 might be sufficient when input dtype + is low precision. If weights_dtype is passed as weights.dtype then no + dtype conversion will occur. The input and output are never converted. + """ + args = [input] + complex_output = input.dtype.kind == 'c' + if weights is not None: + weights = cupy.ascontiguousarray(weights, weights_dtype) + complex_output = complex_output or weights.dtype.kind == 'c' + args.append(weights) + if structure is not None: + structure = cupy.ascontiguousarray(structure, structure_dtype) + args.append(structure) + output = _util._get_output(output, input, None, complex_output) + needs_temp = cupy.shares_memory(output, input, 'MAY_SHARE_BOUNDS') + if needs_temp: + output, temp = _util._get_output(output.dtype, input), output + args.append(output) + kernel(*args) + if needs_temp: + _core.elementwise_copy(temp, output) + output = temp + return output + + +if runtime.is_hip: + includes = r''' +// workaround for HIP: line begins with #include +#include \n +''' +else: + includes = r''' +#include // provide C++ std:: coverage +#include + +template<> struct std::is_floating_point : std::true_type {}; +template<> struct std::is_signed : std::true_type {}; +''' + + +_CAST_FUNCTION = """ +// Implements a casting function to make it compatible with scipy +// Use like cast(value) +template +__device__ __forceinline__ +typename std::enable_if<(!std::is_floating_point::value + || std::is_signed::value), B>::type +cast(A a) { return (B)a; } + +template +__device__ __forceinline__ +typename std::enable_if<(std::is_floating_point::value + && (!std::is_signed::value)), B>::type +cast(A a) { return (a >= 0) ? (B)a : -(B)(-a); } + +template +__device__ __forceinline__ bool nonzero(T x) { return x != static_cast(0); } +""" + + +def _generate_nd_kernel(name, pre, found, post, mode, w_shape, int_type, + offsets, cval, ctype='X', preamble='', options=(), + has_weights=True, has_structure=False, has_mask=False, + binary_morphology=False, all_weights_nonzero=False): + # Currently this code uses CArray for weights but avoids using CArray for + # the input data and instead does the indexing itself since it is faster. + # If CArray becomes faster than follow the comments that start with + # CArray: to switch over to using CArray for the input data as well. + + ndim = len(w_shape) + in_params = 'raw X x' + if has_weights: + in_params += ', raw W w' + if has_structure: + in_params += ', raw S s' + if has_mask: + in_params += ', raw M mask' + out_params = 'Y y' + + # for filters, "wrap" is a synonym for "grid-wrap" + mode = 'grid-wrap' if mode == 'wrap' else mode + + # CArray: remove xstride_{j}=... from string + size = ('%s xsize_{j}=x.shape()[{j}], ysize_{j} = _raw_y.shape()[{j}]' + ', xstride_{j}=x.strides()[{j}];' % int_type) + sizes = [size.format(j=j) for j in range(ndim)] + inds = _util._generate_indices_ops(ndim, int_type, offsets) + # CArray: remove expr entirely + expr = ' + '.join(['ix_{}'.format(j) for j in range(ndim)]) + + ws_init = ws_pre = ws_post = '' + if has_weights or has_structure: + ws_init = 'int iws = 0;' + if has_structure: + ws_pre = 'S sval = s[iws];\n' + if has_weights: + ws_pre += 'W wval = w[iws];\n' + if not all_weights_nonzero: + ws_pre += 'if (nonzero(wval))' + ws_post = 'iws++;' + + loops = [] + for j in range(ndim): + if w_shape[j] == 1: + # CArray: string becomes 'inds[{j}] = ind_{j};', remove (int_)type + loops.append('{{ {type} ix_{j} = ind_{j} * xstride_{j};'. + format(j=j, type=int_type)) + else: + boundary = _util._generate_boundary_condition_ops( + mode, 'ix_{}'.format(j), 'xsize_{}'.format(j), int_type) + # CArray: last line of string becomes inds[{j}] = ix_{j}; + loops.append(''' + for (int iw_{j} = 0; iw_{j} < {wsize}; iw_{j}++) + {{ + {type} ix_{j} = ind_{j} + iw_{j}; + {boundary} + ix_{j} *= xstride_{j}; + '''.format(j=j, wsize=w_shape[j], boundary=boundary, type=int_type)) + + # CArray: string becomes 'x[inds]', no format call needed + value = '(*(X*)&data[{expr}])'.format(expr=expr) + if mode == 'constant': + cond = ' || '.join(['(ix_{} < 0)'.format(j) for j in range(ndim)]) + + if cval is numpy.nan: + cval = 'CUDART_NAN' + elif cval == numpy.inf: + cval = 'CUDART_INF' + elif cval == -numpy.inf: + cval = '-CUDART_INF' + + if binary_morphology: + found = found.format(cond=cond, value=value) + else: + if mode == 'constant': + value = '(({cond}) ? cast<{ctype}>({cval}) : {value})'.format( + cond=cond, ctype=ctype, cval=cval, value=value) + found = found.format(value=value) + + # CArray: replace comment and next line in string with + # {type} inds[{ndim}] = {{0}}; + # and add ndim=ndim, type=int_type to format call + operation = ''' + {sizes} + {inds} + // don't use a CArray for indexing (faster to deal with indexing ourselves) + const unsigned char* data = (const unsigned char*)&x[0]; + {ws_init} + {pre} + {loops} + // inner-most loop + {ws_pre} {{ + {found} + }} + {ws_post} + {end_loops} + {post} + '''.format(sizes='\n'.join(sizes), inds=inds, pre=pre, post=post, + ws_init=ws_init, ws_pre=ws_pre, ws_post=ws_post, + loops='\n'.join(loops), found=found, end_loops='}'*ndim) + + mode_str = mode.replace('-', '_') # avoid potential hyphen in kernel name + name = 'cupyx_scipy_ndimage_{}_{}d_{}_w{}'.format( + name, ndim, mode_str, '_'.join(['{}'.format(x) for x in w_shape])) + if all_weights_nonzero: + name += '_all_nonzero' + if int_type == 'ptrdiff_t': + name += '_i64' + if has_structure: + name += '_with_structure' + if has_mask: + name += '_with_mask' + preamble = includes + _CAST_FUNCTION + preamble + options += ('--std=c++11', ) + return cupy.ElementwiseKernel(in_params, out_params, operation, name, + reduce_dims=False, preamble=preamble, + options=options) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters_generic.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters_generic.py new file mode 100644 index 0000000000000000000000000000000000000000..fbbf7b6afba20a18c423d07c16a2b3b8bba1291a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_filters_generic.py @@ -0,0 +1,272 @@ +import cupy +from cupy_backends.cuda.api import runtime +from cupy import _util +from cupyx.scipy.ndimage import _filters_core + + +def _get_sub_kernel(f): + """ + Takes the "function" given to generic_filter and returns the "sub-kernel" + that will be called, one of RawKernel or ReductionKernel. + + This supports: + * cupy.RawKernel + no checks are possible + * cupy.ReductionKernel + checks that there is a single input and output + """ + if isinstance(f, cupy.RawKernel): + # We will assume that it has the correct API + return f + elif isinstance(f, cupy.ReductionKernel): + if f.nin != 1 or f.nout != 1: + raise TypeError('ReductionKernel must have 1 input and output') + return f + elif isinstance(f, cupy.ElementwiseKernel): + # special error message for ElementwiseKernels + raise TypeError('only ReductionKernel allowed (not ElementwiseKernel)') + else: + raise TypeError('bad function type') + + +@_util.memoize(for_each_device=True) +def _get_generic_filter_red(rk, in_dtype, out_dtype, filter_size, mode, + wshape, offsets, cval, int_type): + """Generic filter implementation based on a reduction kernel.""" + # Get the temporary output c type + in_param, out_param = rk.in_params[0], rk.out_params[0] + out_ctype = out_param.ctype + if out_param.dtype is None: # resolve template + out_ctype = cupy._core._scalar.get_typename( + in_dtype if out_param.ctype == in_param.ctype else out_dtype) + + # Get code chunks + setup = ''' + int iv = 0; + X values[{size}]; + CArray sub_in(values, {{{size}}}); + {out_ctype} val_out; + CArray<{out_ctype}, 1, true, true> sub_out(&val_out, {{1}}); + '''.format(size=filter_size, out_ctype=out_ctype) + + sub_call = '''reduction_kernel::{}(sub_in, sub_out); + y = cast(val_out);'''.format(rk.name) + + sub_kernel = _reduction_kernel_code(rk, filter_size, out_dtype, in_dtype) + + # Get the final kernel + return _filters_core._generate_nd_kernel( + 'generic_{}_{}'.format(filter_size, rk.name), + setup, 'values[iv++] = {value};', sub_call, + mode, wshape, int_type, offsets, cval, preamble=sub_kernel, + options=getattr(rk, 'options', ())) + + +def _reduction_kernel_code(rk, filter_size, out_dtype, in_dtype): + # NOTE: differences from the code generated for real reduction kernels: + # * input is always 1D and always less than 2^31 elements + # * output is always 1D with a single element + # * never across threads (no _block_stride, _sdata, _sdata_raw, _REDUCE, + # _tid, _J, _i, _i_base, _j_offset, _J_offset, _j_stride, _J_stride) + # Also, the code is moved into a namespace so that clashes are minimized + # between the typedefs for the "template" variables. + + # figure out the types + types = {} + in_param, out_param = rk.in_params[0], rk.out_params[0] + in_ctype = _get_type_info(in_param, in_dtype, types) + out_ctype = _get_type_info(out_param, out_dtype, types) + types = '\n'.join('typedef {} {};'.format(typ, name) + for name, typ in types.items()) + + return '''namespace reduction_kernel {{ +{type_preamble} +{preamble} +__device__ +void {name}({in_const} CArray<{in_ctype}, 1, true, true>& _raw_{in_name}, + CArray<{out_ctype}, 1, true, true>& _raw_{out_name}) {{ + // these are just provided so if they are available for the RK + CIndexer<1> _in_ind({{{size}}}); + CIndexer<0> _out_ind; + + #define REDUCE(a, b) ({reduce_expr}) + #define POST_MAP(a) ({post_map_expr}) + typedef {reduce_type} _type_reduce; + _type_reduce _s = _type_reduce({identity}); + for (int _j = 0; _j < {size}; ++_j) {{ + _in_ind.set(_j); + {in_const} {in_ctype}& {in_name} = _raw_{in_name}[_j]; + _type_reduce _a = static_cast<_type_reduce>({pre_map_expr}); + _s = REDUCE(_s, _a); + }} + _out_ind.set(0); + {out_ctype} &{out_name} = _raw_{out_name}[0]; + POST_MAP(_s); + #undef REDUCE + #undef POST_MAP +}} +}}'''.format( + name=rk.name, type_preamble=types, preamble=rk.preamble, + in_const='const' if in_param.is_const else '', + in_ctype=in_ctype, in_name=in_param.name, + out_ctype=out_ctype, out_name=out_param.name, + + pre_map_expr=rk.map_expr, + identity='' if rk.identity is None else rk.identity, + size=filter_size, + reduce_type=rk.reduce_type, reduce_expr=rk.reduce_expr, + post_map_expr=rk.post_map_expr, + ) + + +def _get_type_info(param, dtype, types): + if param.dtype is not None: + return param.ctype + # Template type -> map to actual output type + ctype = cupy._core._scalar.get_typename(dtype) + types.setdefault(param.ctype, ctype) + return ctype + + +@_util.memoize(for_each_device=True) +def _get_generic_filter_raw(rk, filter_size, mode, wshape, offsets, cval, + int_type): + """Generic filter implementation based on a raw kernel.""" + setup = ''' + int iv = 0; + double values[{}]; + double val_out;'''.format(filter_size) + + sub_call = '''raw_kernel::{}(values, {}, &val_out); + y = cast(val_out);'''.format(rk.name, filter_size) + + return _filters_core._generate_nd_kernel( + 'generic_{}_{}'.format(filter_size, rk.name), + setup, 'values[iv++] = cast({value});', sub_call, + mode, wshape, int_type, offsets, cval, + preamble='namespace raw_kernel {{\n{}\n}}'.format( + # Users can test RawKernel independently, but when passed to here + # it must be used as a device function here. In fact, RawKernel + # wouldn't compile if code only contains device functions, so this + # is necessary. + rk.code.replace('__global__', '__device__')), + options=rk.options) + + +@_util.memoize(for_each_device=True) +def _get_generic_filter1d(rk, length, n_lines, filter_size, origin, mode, cval, + in_ctype, out_ctype, int_type): + """ + The generic 1d filter is different than all other filters and thus is the + only filter that doesn't use _generate_nd_kernel() and has a completely + custom raw kernel. + """ + in_length = length + filter_size - 1 + start = filter_size // 2 + origin + end = start + length + + if mode == 'constant': + boundary, boundary_early = '', ''' + for (idx_t j = 0; j < {start}; ++j) {{ input_line[j] = {cval}; }} + for (idx_t j = {end}; j<{in_length}; ++j) {{ input_line[j] = {cval}; }} + '''.format(start=start, end=end, in_length=in_length, cval=cval) + else: + if length == 1: + a = b = 'j_ = 0;' + elif mode == 'reflect': + j = ('j_ = ({j}) % ({length} * 2);\n' + 'j_ = min(j_, 2 * {length} - 1 - j_);') + a = j.format(j='-1 - j_', length=length) + b = j.format(j='j_', length=length) + elif mode == 'mirror': + j = ('j_ = 1 + (({j}) - 1) % (({length} - 1) * 2);\n' + 'j_ = min(j_, 2 * {length} - 2 - j_);') + a = j.format(j='-j_', length=length) + b = j.format(j='j_', length=length) + elif mode == 'nearest': + a, b = 'j_ = 0;', 'j_ = {length}-1;'.format(length=length) + elif mode == 'wrap': + a = 'j_ = j_ % {length} + {length};'.format(length=length) + b = 'j_ = j_ % {length};'.format(length=length) + loop = '''for (idx_t j = {{}}; j < {{}}; ++j) {{{{ + idx_t j_ = j - {start}; + {{}} + input_line[j] = input_line[j_ + {start}]; + }}}}'''.format(start=start) + boundary_early = '' + boundary = (loop.format(0, start, a) + '\n' + + loop.format(end, in_length, b)) + + name = 'generic1d_{}_{}_{}'.format(length, filter_size, rk.name) + if runtime.is_hip: + include_type_traits = '' + else: + include_type_traits = ''' +#include // provide C++ std:: coverage +''' + code = '''#include "cupy/carray.cuh" +#include "cupy/complex.cuh" +{include_type_traits} + +namespace raw_kernel {{\n{rk_code}\n}} + +{CAST} + +typedef unsigned char byte; +typedef {in_ctype} X; +typedef {out_ctype} Y; +typedef {int_type} idx_t; + +__device__ idx_t offset(idx_t i, idx_t axis, idx_t ndim, + const idx_t* shape, const idx_t* strides) {{ + idx_t index = 0; + for (idx_t a = ndim; --a > 0; ) {{ + if (a == axis) {{ continue; }} + index += (i % shape[a]) * strides[a]; + i /= shape[a]; + }} + return index + strides[0] * i; +}} + +extern "C" __global__ +void {name}(const byte* input, byte* output, const idx_t* x) {{ + const idx_t axis = x[0], ndim = x[1], + *shape = x+2, *in_strides = x+2+ndim, *out_strides = x+2+2*ndim; + + const idx_t in_elem_stride = in_strides[axis]; + const idx_t out_elem_stride = out_strides[axis]; + + double input_line[{in_length}]; + double output_line[{length}]; + {boundary_early} + + for (idx_t i = ((idx_t)blockIdx.x) * blockDim.x + threadIdx.x; + i < {n_lines}; + i += ((idx_t)blockDim.x) * gridDim.x) {{ + // Copy line from input (with boundary filling) + const byte* input_ = input + offset(i, axis, ndim, shape, in_strides); + for (idx_t j = 0; j < {length}; ++j) {{ + input_line[j+{start}] = (double)*(X*)(input_+j*in_elem_stride); + }} + {boundary} + + raw_kernel::{rk_name}(input_line, {in_length}, output_line, {length}); + + // Copy line to output + byte* output_ = output + offset(i, axis, ndim, shape, out_strides); + for (idx_t j = 0; j < {length}; ++j) {{ + *(Y*)(output_+j*out_elem_stride) = cast(output_line[j]); + }} + }} +}}'''.format(n_lines=n_lines, length=length, in_length=in_length, start=start, + in_ctype=in_ctype, out_ctype=out_ctype, int_type=int_type, + boundary_early=boundary_early, boundary=boundary, + name=name, rk_name=rk.name, + # Users can test RawKernel independently, but when passed to here + # it must be used as a device function here. In fact, RawKernel + # wouldn't compile if code only contains device functions, so this + # is necessary. + rk_code=rk.code.replace('__global__', '__device__'), + include_type_traits=include_type_traits, + CAST=_filters_core._CAST_FUNCTION) + return cupy.RawKernel(code, name, ('--std=c++11',) + rk.options) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_fourier.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_fourier.py new file mode 100644 index 0000000000000000000000000000000000000000..a0ee3a5a9d90431e9b184b25f2599e4359488e4d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_fourier.py @@ -0,0 +1,253 @@ +import numpy + +import cupy +from cupy import _core +from cupy._core import internal +from cupyx.scipy.ndimage import _util +from cupyx.scipy import special + + +def _get_output_fourier(output, input, complex_only=False): + types = [cupy.complex64, cupy.complex128] + if not complex_only: + types += [cupy.float32, cupy.float64] + + if output is None: + if input.dtype in types: + output = cupy.empty(input.shape, dtype=input.dtype) + else: + output = cupy.empty(input.shape, dtype=types[-1]) + elif type(output) is type: + if output not in types: + raise RuntimeError('output type not supported') + output = cupy.empty(input.shape, dtype=output) + elif output.shape != input.shape: + raise RuntimeError('output shape not correct') + return output + + +def _reshape_nd(arr, ndim, axis): + """Promote a 1d array to ndim with non-singleton size along axis.""" + nd_shape = (1,) * axis + (arr.size,) + (1,) * (ndim - axis - 1) + return arr.reshape(nd_shape) + + +def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None): + """Multidimensional Gaussian shift filter. + + The array is multiplied with the Fourier transform of a (separable) + Gaussian kernel. + + Args: + input (cupy.ndarray): The input array. + sigma (float or sequence of float): The sigma of the Gaussian kernel. + If a float, `sigma` is the same for all axes. If a sequence, + `sigma` has to contain one value for each axis. + n (int, optional): If `n` is negative (default), then the input is + assumed to be the result of a complex fft. If `n` is larger than or + equal to zero, the input is assumed to be the result of a real fft, + and `n` gives the length of the array before transformation along + the real transform direction. + axis (int, optional): The axis of the real transform (only used when + ``n > -1``). + output (cupy.ndarray, optional): + If given, the result of shifting the input is placed in this array. + + Returns: + output (cupy.ndarray): The filtered output. + """ + ndim = input.ndim + output = _get_output_fourier(output, input) + axis = internal._normalize_axis_index(axis, ndim) + sigmas = _util._fix_sequence_arg(sigma, ndim, 'sigma') + + _core.elementwise_copy(input, output) + for ax, (sigmak, ax_size) in enumerate(zip(sigmas, output.shape)): + + # compute the frequency grid in Hz + if ax == axis and n > 0: + arr = cupy.arange(ax_size, dtype=output.real.dtype) + arr /= n + else: + arr = cupy.fft.fftfreq(ax_size) + arr = arr.astype(output.real.dtype, copy=False) + + # compute the Gaussian weights + arr *= arr + scale = sigmak * sigmak / -2 + arr *= (4 * numpy.pi * numpy.pi) * scale + cupy.exp(arr, out=arr) + + # reshape for broadcasting + arr = _reshape_nd(arr, ndim=ndim, axis=ax) + output *= arr + + return output + + +def fourier_uniform(input, size, n=-1, axis=-1, output=None): + """Multidimensional uniform shift filter. + + The array is multiplied with the Fourier transform of a box of given size. + + Args: + input (cupy.ndarray): The input array. + size (float or sequence of float): The sigma of the box used for + filtering. If a float, `size` is the same for all axes. If a + sequence, `size` has to contain one value for each axis. + n (int, optional): If `n` is negative (default), then the input is + assumed to be the result of a complex fft. If `n` is larger than or + equal to zero, the input is assumed to be the result of a real fft, + and `n` gives the length of the array before transformation along + the real transform direction. + axis (int, optional): The axis of the real transform (only used when + ``n > -1``). + output (cupy.ndarray, optional): + If given, the result of shifting the input is placed in this array. + + Returns: + output (cupy.ndarray): The filtered output. + """ + ndim = input.ndim + output = _get_output_fourier(output, input) + axis = internal._normalize_axis_index(axis, ndim) + sizes = _util._fix_sequence_arg(size, ndim, 'size') + + _core.elementwise_copy(input, output) + for ax, (size, ax_size) in enumerate(zip(sizes, output.shape)): + + # compute the frequency grid in Hz + if ax == axis and n > 0: + arr = cupy.arange(ax_size, dtype=output.real.dtype) + arr /= n + else: + arr = cupy.fft.fftfreq(ax_size) + arr = arr.astype(output.real.dtype, copy=False) + + # compute the uniform filter weights + arr *= size + cupy.sinc(arr, out=arr) + + # reshape for broadcasting + arr = _reshape_nd(arr, ndim=ndim, axis=ax) + output *= arr + + return output + + +def fourier_shift(input, shift, n=-1, axis=-1, output=None): + """Multidimensional Fourier shift filter. + + The array is multiplied with the Fourier transform of a shift operation. + + Args: + input (cupy.ndarray): The input array. This should be in the Fourier + domain. + shift (float or sequence of float): The size of shift. If a float, + `shift` is the same for all axes. If a sequence, `shift` has to + contain one value for each axis. + n (int, optional): If `n` is negative (default), then the input is + assumed to be the result of a complex fft. If `n` is larger than or + equal to zero, the input is assumed to be the result of a real fft, + and `n` gives the length of the array before transformation along + the real transform direction. + axis (int, optional): The axis of the real transform (only used when + ``n > -1``). + output (cupy.ndarray, optional): + If given, the result of shifting the input is placed in this array. + + Returns: + output (cupy.ndarray): The shifted output (in the Fourier domain). + """ + ndim = input.ndim + output = _get_output_fourier(output, input, complex_only=True) + axis = internal._normalize_axis_index(axis, ndim) + shifts = _util._fix_sequence_arg(shift, ndim, 'shift') + + _core.elementwise_copy(input, output) + for ax, (shiftk, ax_size) in enumerate(zip(shifts, output.shape)): + if shiftk == 0: + continue + if ax == axis and n > 0: + # cp.fft.rfftfreq(ax_size) * (-2j * numpy.pi * shiftk * ax_size/n) + arr = cupy.arange(ax_size, dtype=output.dtype) + arr *= -2j * numpy.pi * shiftk / n + else: + arr = cupy.fft.fftfreq(ax_size) + arr = arr * (-2j * numpy.pi * shiftk) + cupy.exp(arr, out=arr) + + # reshape for broadcasting + arr = _reshape_nd(arr, ndim=ndim, axis=ax) + output *= arr + + return output + + +def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None): + """Multidimensional ellipsoid Fourier filter. + + The array is multiplied with the fourier transform of a ellipsoid of + given sizes. + + Args: + input (cupy.ndarray): The input array. + size (float or sequence of float): The size of the box used for + filtering. If a float, `size` is the same for all axes. If a + sequence, `size` has to contain one value for each axis. + n (int, optional): If `n` is negative (default), then the input is + assumed to be the result of a complex fft. If `n` is larger than or + equal to zero, the input is assumed to be the result of a real fft, + and `n` gives the length of the array before transformation along + the real transform direction. + axis (int, optional): The axis of the real transform (only used when + ``n > -1``). + output (cupy.ndarray, optional): + If given, the result of shifting the input is placed in this array. + + Returns: + output (cupy.ndarray): The filtered output. + """ + ndim = input.ndim + if ndim == 1: + return fourier_uniform(input, size, n, axis, output) + + if ndim > 3: + # Note: SciPy currently does not do any filtering on >=4d inputs, but + # does not warn about this! + raise NotImplementedError('Only 1d, 2d and 3d inputs are supported') + output = _get_output_fourier(output, input) + axis = internal._normalize_axis_index(axis, ndim) + sizes = _util._fix_sequence_arg(size, ndim, 'size') + + _core.elementwise_copy(input, output) + + # compute the distance from the origin for all samples in Fourier space + distance = 0 + for ax, (size, ax_size) in enumerate(zip(sizes, output.shape)): + # compute the frequency grid in Hz + if ax == axis and n > 0: + arr = cupy.arange(ax_size, dtype=output.real.dtype) + arr *= numpy.pi * size / n + else: + arr = cupy.fft.fftfreq(ax_size) + arr *= numpy.pi * size + arr = arr.astype(output.real.dtype, copy=False) + arr *= arr + arr = _reshape_nd(arr, ndim=ndim, axis=ax) + distance = distance + arr + cupy.sqrt(distance, out=distance) + + if ndim == 2: + special.j1(distance, out=output) + output *= 2 + output /= distance + elif ndim == 3: + cupy.sin(distance, out=output) + output -= distance * cupy.cos(distance) + output *= 3 + output /= distance ** 3 + output[(0,) * ndim] = 1.0 # avoid NaN in corner at frequency=0 location + output *= input + + return output diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_interp_kernels.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_interp_kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..1020ea532213f1b6394f24979d734a5860f8218b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_interp_kernels.py @@ -0,0 +1,598 @@ +import numpy + +import cupy +import cupy._core.internal + +from cupyx.scipy.ndimage import _spline_prefilter_core +from cupyx.scipy.ndimage import _spline_kernel_weights +from cupyx.scipy.ndimage import _util + +math_constants_preamble = r''' +// workaround for HIP: line begins with #include +#include +''' + +spline_weights_inline = _spline_kernel_weights.spline_weights_inline + + +def _get_coord_map(ndim, nprepad=0): + """Extract target coordinate from coords array (for map_coordinates). + + Notes + ----- + Assumes the following variables have been initialized on the device:: + + coords (ndarray): array of shape (ncoords, ndim) containing the target + coordinates. + c_j: variables to hold the target coordinates + + computes:: + + c_j = coords[i + j * ncoords]; + + ncoords is determined by the size of the output array, y. + y will be indexed by the CIndexer, _ind. + Thus ncoords = _ind.size(); + + """ + ops = [] + ops.append('ptrdiff_t ncoords = _ind.size();') + pre = f" + (W){nprepad}" if nprepad > 0 else '' + for j in range(ndim): + ops.append(f''' + W c_{j} = coords[i + {j} * ncoords]{pre};''') + return ops + + +def _get_coord_zoom_and_shift(ndim, nprepad=0): + """Compute target coordinate based on a shift followed by a zoom. + + This version zooms from the center of the edge pixels. + + Notes + ----- + Assumes the following variables have been initialized on the device:: + + in_coord[ndim]: array containing the source coordinate + zoom[ndim]: array containing the zoom for each axis + shift[ndim]: array containing the zoom for each axis + + computes:: + + c_j = zoom[j] * (in_coord[j] - shift[j]) + + """ + ops = [] + pre = f" + (W){nprepad}" if nprepad > 0 else '' + for j in range(ndim): + ops.append(f''' + W c_{j} = zoom[{j}] * ((W)in_coord[{j}] - shift[{j}]){pre};''') + return ops + + +def _get_coord_zoom_and_shift_grid(ndim, nprepad=0): + """Compute target coordinate based on a shift followed by a zoom. + + This version zooms from the outer edges of the grid pixels. + + Notes + ----- + Assumes the following variables have been initialized on the device:: + + in_coord[ndim]: array containing the source coordinate + zoom[ndim]: array containing the zoom for each axis + shift[ndim]: array containing the zoom for each axis + + computes:: + + c_j = zoom[j] * (in_coord[j] - shift[j] + 0.5) - 0.5 + + """ + ops = [] + pre = f" + (W){nprepad}" if nprepad > 0 else '' + for j in range(ndim): + ops.append(f''' + W c_{j} = zoom[{j}] * ((W)in_coord[{j}] - shift[j] + 0.5) - 0.5{pre};''') + return ops + + +def _get_coord_zoom(ndim, nprepad=0): + """Compute target coordinate based on a zoom. + + This version zooms from the center of the edge pixels. + + Notes + ----- + Assumes the following variables have been initialized on the device:: + + in_coord[ndim]: array containing the source coordinate + zoom[ndim]: array containing the zoom for each axis + + computes:: + + c_j = zoom[j] * in_coord[j] + + """ + ops = [] + pre = f" + (W){nprepad}" if nprepad > 0 else '' + for j in range(ndim): + ops.append(f''' + W c_{j} = zoom[{j}] * (W)in_coord[{j}]{pre};''') + return ops + + +def _get_coord_zoom_grid(ndim, nprepad=0): + """Compute target coordinate based on a zoom (grid_mode=True version). + + This version zooms from the outer edges of the grid pixels. + + Notes + ----- + Assumes the following variables have been initialized on the device:: + + in_coord[ndim]: array containing the source coordinate + zoom[ndim]: array containing the zoom for each axis + + computes:: + + c_j = zoom[j] * (in_coord[j] + 0.5) - 0.5 + + """ + ops = [] + pre = f" + (W){nprepad}" if nprepad > 0 else '' + for j in range(ndim): + ops.append(f''' + W c_{j} = zoom[{j}] * ((W)in_coord[{j}] + 0.5) - 0.5{pre};''') + return ops + + +def _get_coord_shift(ndim, nprepad=0): + """Compute target coordinate based on a shift. + + Notes + ----- + Assumes the following variables have been initialized on the device:: + + in_coord[ndim]: array containing the source coordinate + shift[ndim]: array containing the zoom for each axis + + computes:: + + c_j = in_coord[j] - shift[j] + + """ + ops = [] + pre = f" + (W){nprepad}" if nprepad > 0 else '' + for j in range(ndim): + ops.append(f''' + W c_{j} = (W)in_coord[{j}] - shift[{j}]{pre};''') + return ops + + +def _get_coord_affine(ndim, nprepad=0): + """Compute target coordinate based on a homogeneous transformation matrix. + + The homogeneous matrix has shape (ndim, ndim + 1). It corresponds to + affine matrix where the last row of the affine is assumed to be: + ``[0] * ndim + [1]``. + + Notes + ----- + Assumes the following variables have been initialized on the device:: + + mat(array): array containing the (ndim, ndim + 1) transform matrix. + in_coords(array): coordinates of the input + + For example, in 2D: + + c_0 = mat[0] * in_coords[0] + mat[1] * in_coords[1] + aff[2]; + c_1 = mat[3] * in_coords[0] + mat[4] * in_coords[1] + aff[5]; + + """ + ops = [] + pre = f" + (W){nprepad}" if nprepad > 0 else '' + ncol = ndim + 1 + for j in range(ndim): + ops.append(f''' + W c_{j} = (W)0.0;''') + for k in range(ndim): + ops.append(f''' + c_{j} += mat[{ncol * j + k}] * (W)in_coord[{k}];''') + ops.append(f''' + c_{j} += mat[{ncol * j + ndim}]{pre};''') + return ops + + +def _unravel_loop_index(shape, uint_t='unsigned int'): + """ + declare a multi-index array in_coord and unravel the 1D index, i into it. + This code assumes that the array is a C-ordered array. + """ + ndim = len(shape) + code = [f''' + {uint_t} in_coord[{ndim}]; + {uint_t} s, t, idx = i;'''] + for j in range(ndim - 1, 0, -1): + code.append(f''' + s = {shape[j]}; + t = idx / s; + in_coord[{j}] = idx - t * s; + idx = t;''') + code.append(''' + in_coord[0] = idx;''') + return '\n'.join(code) + + +def _generate_interp_custom(coord_func, ndim, large_int, yshape, mode, cval, + order, name='', integer_output=False, nprepad=0, + omit_in_coord=False): + """ + Args: + coord_func (function): generates code to do the coordinate + transformation. See for example, `_get_coord_shift`. + ndim (int): The number of dimensions. + large_int (bool): If true use Py_ssize_t instead of int for indexing. + yshape (tuple): Shape of the output array. + mode (str): Signal extension mode to use at the array boundaries + cval (float): constant value used when `mode == 'constant'`. + name (str): base name for the interpolation kernel + integer_output (bool): boolean indicating whether the output has an + integer type. + nprepad (int): integer indicating the amount of prepadding at the + boundaries. + + Returns: + operation (str): code body for the ElementwiseKernel + name (str): name for the ElementwiseKernel + """ + + ops = [] + internal_dtype = 'double' if integer_output else 'Y' + ops.append(f'{internal_dtype} out = 0.0;') + + if large_int: + uint_t = 'size_t' + int_t = 'ptrdiff_t' + else: + uint_t = 'unsigned int' + int_t = 'int' + + # determine strides for x along each axis + for j in range(ndim): + ops.append(f'const {int_t} xsize_{j} = x.shape()[{j}];') + ops.append(f'const {uint_t} sx_{ndim - 1} = 1;') + for j in range(ndim - 1, 0, -1): + ops.append(f'const {uint_t} sx_{j - 1} = sx_{j} * xsize_{j};') + + if not omit_in_coord: + # create in_coords array to store the unraveled indices + ops.append(_unravel_loop_index(yshape, uint_t)) + + # compute the transformed (target) coordinates, c_j + ops = ops + coord_func(ndim, nprepad) + + if cval is numpy.nan: + cval = '(Y)CUDART_NAN' + elif cval == numpy.inf: + cval = '(Y)CUDART_INF' + elif cval == -numpy.inf: + cval = '(Y)(-CUDART_INF)' + else: + cval = f'({internal_dtype}){cval}' + + if mode == 'constant': + # use cval if coordinate is outside the bounds of x + _cond = ' || '.join( + [f'(c_{j} < 0) || (c_{j} > xsize_{j} - 1)' for j in range(ndim)]) + ops.append(f''' + if ({_cond}) + {{ + out = {cval}; + }} + else + {{''') + + if order == 0: + if mode == 'wrap': + ops.append('double dcoord;') # mode 'wrap' requires this to work + for j in range(ndim): + # determine nearest neighbor + if mode == 'wrap': + ops.append(f''' + dcoord = c_{j};''') + else: + ops.append(f''' + {int_t} cf_{j} = ({int_t})floor((double)c_{j} + 0.5);''') + + # handle boundary + if mode != 'constant': + if mode == 'wrap': + ixvar = 'dcoord' + float_ix = True + else: + ixvar = f'cf_{j}' + float_ix = False + ops.append( + _util._generate_boundary_condition_ops( + mode, ixvar, f'xsize_{j}', int_t, float_ix)) + if mode == 'wrap': + ops.append(f''' + {int_t} cf_{j} = ({int_t})floor(dcoord + 0.5);''') + + # sum over ic_j will give the raveled coordinate in the input + ops.append(f''' + {int_t} ic_{j} = cf_{j} * sx_{j};''') + _coord_idx = ' + '.join([f'ic_{j}' for j in range(ndim)]) + if mode == 'grid-constant': + _cond = ' || '.join([f'(ic_{j} < 0)' for j in range(ndim)]) + ops.append(f''' + if ({_cond}) {{ + out = {cval}; + }} else {{ + out = ({internal_dtype})x[{_coord_idx}]; + }}''') + else: + ops.append(f''' + out = ({internal_dtype})x[{_coord_idx}];''') + + elif order == 1: + for j in range(ndim): + # get coordinates for linear interpolation along axis j + ops.append(f''' + {int_t} cf_{j} = ({int_t})floor((double)c_{j}); + {int_t} cc_{j} = cf_{j} + 1; + {int_t} n_{j} = (c_{j} == cf_{j}) ? 1 : 2; // points needed + ''') + + if mode == 'wrap': + ops.append(f''' + double dcoordf = c_{j}; + double dcoordc = c_{j} + 1;''') + else: + # handle boundaries for extension modes. + ops.append(f''' + {int_t} cf_bounded_{j} = cf_{j}; + {int_t} cc_bounded_{j} = cc_{j};''') + + if mode != 'constant': + if mode == 'wrap': + ixvar = 'dcoordf' + float_ix = True + else: + ixvar = f'cf_bounded_{j}' + float_ix = False + ops.append( + _util._generate_boundary_condition_ops( + mode, ixvar, f'xsize_{j}', int_t, float_ix)) + + ixvar = 'dcoordc' if mode == 'wrap' else f'cc_bounded_{j}' + ops.append( + _util._generate_boundary_condition_ops( + mode, ixvar, f'xsize_{j}', int_t, float_ix)) + if mode == 'wrap': + ops.append( + f''' + {int_t} cf_bounded_{j} = ({int_t})floor(dcoordf);; + {int_t} cc_bounded_{j} = ({int_t})floor(dcoordf + 1);; + ''' + ) + + ops.append(f''' + for (int s_{j} = 0; s_{j} < n_{j}; s_{j}++) + {{ + W w_{j}; + {int_t} ic_{j}; + if (s_{j} == 0) + {{ + w_{j} = (W)cc_{j} - c_{j}; + ic_{j} = cf_bounded_{j} * sx_{j}; + }} else + {{ + w_{j} = c_{j} - (W)cf_{j}; + ic_{j} = cc_bounded_{j} * sx_{j}; + }}''') + elif order > 1: + if mode == 'grid-constant': + spline_mode = 'constant' + elif mode == 'nearest': + spline_mode = 'nearest' + else: + spline_mode = _spline_prefilter_core._get_spline_mode(mode) + + # wx, wy are temporary variables used during spline weight computation + ops.append(f''' + W wx, wy; + {int_t} start;''') + for j in range(ndim): + # determine weights along the current axis + ops.append(f''' + W weights_{j}[{order + 1}];''') + ops.append(spline_weights_inline[order].format(j=j, order=order)) + + # get starting coordinate for spline interpolation along axis j + if mode in ['wrap']: + ops.append(f'double dcoord = c_{j};') + coord_var = 'dcoord' + ops.append( + _util._generate_boundary_condition_ops( + mode, coord_var, f'xsize_{j}', int_t, True)) + else: + coord_var = f'(double)c_{j}' + + if order & 1: + op_str = ''' + start = ({int_t})floor({coord_var}) - {order_2};''' + else: + op_str = ''' + start = ({int_t})floor({coord_var} + 0.5) - {order_2};''' + ops.append( + op_str.format( + int_t=int_t, coord_var=coord_var, order_2=order // 2 + )) + + # set of coordinate values within spline footprint along axis j + ops.append(f'''{int_t} ci_{j}[{order + 1}];''') + for k in range(order + 1): + ixvar = f'ci_{j}[{k}]' + ops.append(f''' + {ixvar} = start + {k};''') + ops.append( + _util._generate_boundary_condition_ops( + spline_mode, ixvar, f'xsize_{j}', int_t)) + + # loop over the order + 1 values in the spline filter + ops.append(f''' + W w_{j}; + {int_t} ic_{j}; + for (int k_{j} = 0; k_{j} <= {order}; k_{j}++) + {{ + w_{j} = weights_{j}[k_{j}]; + ic_{j} = ci_{j}[k_{j}] * sx_{j}; + ''') + + if order > 0: + + _weight = ' * '.join([f'w_{j}' for j in range(ndim)]) + _coord_idx = ' + '.join([f'ic_{j}' for j in range(ndim)]) + if mode == 'grid-constant' or (order > 1 and mode == 'constant'): + _cond = ' || '.join([f'(ic_{j} < 0)' for j in range(ndim)]) + ops.append(f''' + if ({_cond}) {{ + out += {cval} * ({internal_dtype})({_weight}); + }} else {{ + {internal_dtype} val = ({internal_dtype})x[{_coord_idx}]; + out += val * ({internal_dtype})({_weight}); + }}''') + else: + ops.append(f''' + {internal_dtype} val = ({internal_dtype})x[{_coord_idx}]; + out += val * ({internal_dtype})({_weight});''') + + ops.append('}' * ndim) + + if mode == 'constant': + ops.append('}') + + if integer_output: + ops.append('y = (Y)rint((double)out);') + else: + ops.append('y = (Y)out;') + operation = '\n'.join(ops) + + mode_str = mode.replace('-', '_') # avoid hyphen in kernel name + name = 'cupyx_scipy_ndimage_interpolate_{}_order{}_{}_{}d_y{}'.format( + name, order, mode_str, ndim, '_'.join([f'{j}' for j in yshape]), + ) + if uint_t == 'size_t': + name += '_i64' + return operation, name + + +@cupy._util.memoize(for_each_device=True) +def _get_map_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, + integer_output=False, nprepad=0): + in_params = 'raw X x, raw W coords' + out_params = 'Y y' + operation, name = _generate_interp_custom( + coord_func=_get_coord_map, + ndim=ndim, + large_int=large_int, + yshape=yshape, + mode=mode, + cval=cval, + order=order, + name='map', + integer_output=integer_output, + nprepad=nprepad, + omit_in_coord=True, # input image coordinates are not needed + ) + return cupy.ElementwiseKernel(in_params, out_params, operation, name, + preamble=math_constants_preamble) + + +@cupy._util.memoize(for_each_device=True) +def _get_shift_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, + integer_output=False, nprepad=0): + in_params = 'raw X x, raw W shift' + out_params = 'Y y' + operation, name = _generate_interp_custom( + coord_func=_get_coord_shift, + ndim=ndim, + large_int=large_int, + yshape=yshape, + mode=mode, + cval=cval, + order=order, + name='shift', + integer_output=integer_output, + nprepad=nprepad, + ) + return cupy.ElementwiseKernel(in_params, out_params, operation, name, + preamble=math_constants_preamble) + + +@cupy._util.memoize(for_each_device=True) +def _get_zoom_shift_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, + integer_output=False, grid_mode=False, nprepad=0): + in_params = 'raw X x, raw W shift, raw W zoom' + out_params = 'Y y' + if grid_mode: + zoom_shift_func = _get_coord_zoom_and_shift_grid + else: + zoom_shift_func = _get_coord_zoom_and_shift + operation, name = _generate_interp_custom( + coord_func=zoom_shift_func, + ndim=ndim, + large_int=large_int, + yshape=yshape, + mode=mode, + cval=cval, + order=order, + name="zoom_shift_grid" if grid_mode else "zoom_shift", + integer_output=integer_output, + nprepad=nprepad, + ) + return cupy.ElementwiseKernel(in_params, out_params, operation, name, + preamble=math_constants_preamble) + + +@cupy._util.memoize(for_each_device=True) +def _get_zoom_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, + integer_output=False, grid_mode=False, nprepad=0): + in_params = 'raw X x, raw W zoom' + out_params = 'Y y' + operation, name = _generate_interp_custom( + coord_func=_get_coord_zoom_grid if grid_mode else _get_coord_zoom, + ndim=ndim, + large_int=large_int, + yshape=yshape, + mode=mode, + cval=cval, + order=order, + name="zoom_grid" if grid_mode else "zoom", + integer_output=integer_output, + nprepad=nprepad, + ) + return cupy.ElementwiseKernel(in_params, out_params, operation, name, + preamble=math_constants_preamble) + + +@cupy._util.memoize(for_each_device=True) +def _get_affine_kernel(ndim, large_int, yshape, mode, cval=0.0, order=1, + integer_output=False, nprepad=0): + in_params = 'raw X x, raw W mat' + out_params = 'Y y' + operation, name = _generate_interp_custom( + coord_func=_get_coord_affine, + ndim=ndim, + large_int=large_int, + yshape=yshape, + mode=mode, + cval=cval, + order=order, + name='affine', + integer_output=integer_output, + nprepad=nprepad, + ) + return cupy.ElementwiseKernel(in_params, out_params, operation, name, + preamble=math_constants_preamble) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_interpolation.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_interpolation.py new file mode 100644 index 0000000000000000000000000000000000000000..99bdeac1108230e8815bc4cf57081f8edfb05b4c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_interpolation.py @@ -0,0 +1,780 @@ +import math +import warnings + +import cupy +import numpy + +from cupy import _core +from cupy._core import internal +from cupy.cuda import runtime +from cupyx import _texture +from cupyx.scipy.ndimage import _util +from cupyx.scipy.ndimage import _interp_kernels +from cupyx.scipy.ndimage import _spline_prefilter_core + +_prod = cupy._core.internal.prod + + +def _check_parameter(func_name, order, mode): + if order is None: + warnings.warn(f'Currently the default order of {func_name} is 1. In a ' + 'future release this may change to 3 to match ' + 'scipy.ndimage ') + elif order < 0 or 5 < order: + raise ValueError('spline order is not supported') + + if mode not in ('constant', 'grid-constant', 'nearest', 'mirror', + 'reflect', 'grid-mirror', 'wrap', 'grid-wrap', 'opencv', + '_opencv_edge'): + raise ValueError('boundary mode ({}) is not supported'.format(mode)) + + +def _get_spline_output(input, output): + """Create workspace array, temp, and the final dtype for the output. + + Differs from SciPy by not always forcing the internal floating point dtype + to be double precision. + """ + complex_data = input.dtype.kind == 'c' + if complex_data: + min_float_dtype = cupy.complex64 + else: + min_float_dtype = cupy.float32 + if isinstance(output, cupy.ndarray): + if complex_data and output.dtype.kind != 'c': + raise ValueError( + 'output must have complex dtype for complex inputs' + ) + float_dtype = cupy.promote_types(output.dtype, min_float_dtype) + output_dtype = output.dtype + else: + if output is None: + output = output_dtype = input.dtype + else: + output_dtype = cupy.dtype(output) + float_dtype = cupy.promote_types(output, min_float_dtype) + + if (isinstance(output, cupy.ndarray) + and output.dtype == float_dtype == output_dtype + and output.flags.c_contiguous): + if output is not input: + _core.elementwise_copy(input, output) + temp = output + else: + temp = input.astype(float_dtype, copy=False) + temp = cupy.ascontiguousarray(temp) + if cupy.shares_memory(temp, input, 'MAY_SHARE_BOUNDS'): + temp = temp.copy() + return temp, float_dtype, output_dtype + + +def spline_filter1d(input, order=3, axis=-1, output=cupy.float64, + mode='mirror'): + """ + Calculate a 1-D spline filter along the given axis. + + The lines of the array along the given axis are filtered by a + spline filter. The order of the spline must be >= 2 and <= 5. + + Args: + input (cupy.ndarray): The input array. + order (int): The order of the spline interpolation, default is 3. Must + be in the range 0-5. + axis (int): The axis along which the spline filter is applied. Default + is the last axis. + output (cupy.ndarray or dtype, optional): The array in which to place + the output, or the dtype of the returned array. Default is + ``numpy.float64``. + mode (str): Points outside the boundaries of the input are filled + according to the given mode (``'constant'``, ``'nearest'``, + ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``, + ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``). + + Returns: + cupy.ndarray: The result of prefiltering the input. + + .. seealso:: :func:`scipy.spline_filter1d` + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + x = input + ndim = x.ndim + axis = internal._normalize_axis_index(axis, ndim) + + # order 0, 1 don't require reshaping as no CUDA kernel will be called + # scalar or size 1 arrays also don't need to be filtered + run_kernel = not (order < 2 or x.ndim == 0 or x.shape[axis] == 1) + if not run_kernel: + output = _util._get_output(output, input) + _core.elementwise_copy(x, output) + return output + + temp, data_dtype, output_dtype = _get_spline_output(x, output) + data_type = cupy._core._scalar.get_typename(temp.dtype) + pole_type = cupy._core._scalar.get_typename(temp.real.dtype) + + index_type = _util._get_inttype(input) + index_dtype = cupy.int32 if index_type == 'int' else cupy.int64 + + n_samples = x.shape[axis] + n_signals = x.size // n_samples + info = cupy.array((n_signals, n_samples) + x.shape, dtype=index_dtype) + + # empirical choice of block size that seemed to work well + block_size = max(2 ** math.ceil(numpy.log2(n_samples / 32)), 8) + kern = _spline_prefilter_core.get_raw_spline1d_kernel( + axis, + ndim, + mode, + order=order, + index_type=index_type, + data_type=data_type, + pole_type=pole_type, + block_size=block_size, + ) + + # Due to recursive nature, a given line of data must be processed by a + # single thread. n_signals lines will be processed in total. + block = (block_size,) + grid = ((n_signals + block[0] - 1) // block[0],) + + # apply prefilter gain + poles = _spline_prefilter_core.get_poles(order=order) + temp *= _spline_prefilter_core.get_gain(poles) + + # apply caual + anti-causal IIR spline filters + kern(grid, block, (temp, info)) + + if isinstance(output, cupy.ndarray) and temp is not output: + # copy kernel output into the user-provided output array + _core.elementwise_copy(temp, output) + return output + return temp.astype(output_dtype, copy=False) + + +def spline_filter(input, order=3, output=cupy.float64, mode='mirror'): + """Multidimensional spline filter. + + Args: + input (cupy.ndarray): The input array. + order (int): The order of the spline interpolation, default is 3. Must + be in the range 0-5. + output (cupy.ndarray or dtype, optional): The array in which to place + the output, or the dtype of the returned array. Default is + ``numpy.float64``. + mode (str): Points outside the boundaries of the input are filled + according to the given mode (``'constant'``, ``'nearest'``, + ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``, + ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``). + + Returns: + cupy.ndarray: The result of prefiltering the input. + + .. seealso:: :func:`scipy.spline_filter1d` + """ + if order < 2 or order > 5: + raise RuntimeError('spline order not supported') + + x = input + temp, data_dtype, output_dtype = _get_spline_output(x, output) + if order not in [0, 1] and input.ndim > 0: + for axis in range(x.ndim): + spline_filter1d(x, order, axis, output=temp, mode=mode) + x = temp + if isinstance(output, cupy.ndarray): + _core.elementwise_copy(temp, output) + else: + output = temp + if output.dtype != output_dtype: + output = output.astype(output_dtype) + return output + + +def _check_coordinates(coordinates, order, allow_float32=True): + if coordinates.dtype.kind == 'f': + if allow_float32: + coord_dtype = cupy.promote_types(coordinates.dtype, cupy.float32) + else: + coord_dtype = cupy.promote_types(coordinates.dtype, cupy.float64) + coordinates = coordinates.astype(coord_dtype, copy=False) + elif coordinates.dtype.kind in 'iu': + if order > 1: + # order > 1 (spline) kernels require floating-point coordinates + if allow_float32: + coord_dtype = cupy.promote_types( + coordinates.dtype, cupy.float32 + ) + else: + coord_dtype = cupy.promote_types( + coordinates.dtype, cupy.float64 + ) + coordinates = coordinates.astype(coord_dtype) + else: + raise ValueError('coordinates should have floating point dtype') + if not coordinates.flags.c_contiguous: + coordinates = cupy.ascontiguousarray(coordinates) + return coordinates + + +def _prepad_for_spline_filter(input, mode, cval): + if mode in ['nearest', 'grid-constant']: + # these modes need padding to get accurate boundary values + npad = 12 # empirical factor chosen by SciPy + if mode == 'grid-constant': + kwargs = dict(mode='constant', constant_values=cval) + else: + kwargs = dict(mode='edge') + padded = cupy.pad(input, npad, **kwargs) + else: + npad = 0 + padded = input + return padded, npad + + +def _filter_input(image, prefilter, mode, cval, order): + """Perform spline prefiltering when needed. + + Spline orders > 1 need a prefiltering stage to preserve resolution. + + For boundary modes without analytical spline boundary conditions, some + prepadding of the input with cupy.pad is used to maintain accuracy. + ``npad`` is an integer corresponding to the amount of padding at each edge + of the array. + """ + if not prefilter or order < 2: + return (cupy.ascontiguousarray(image), 0) + padded, npad = _prepad_for_spline_filter(image, mode, cval) + float_dtype = cupy.promote_types(image.dtype, cupy.float32) + filtered = spline_filter(padded, order, output=float_dtype, mode=mode) + return cupy.ascontiguousarray(filtered), npad + + +def map_coordinates(input, coordinates, output=None, order=3, + mode='constant', cval=0.0, prefilter=True): + """Map the input array to new coordinates by interpolation. + + The array of coordinates is used to find, for each point in the output, the + corresponding coordinates in the input. The value of the input at those + coordinates is determined by spline interpolation of the requested order. + + The shape of the output is derived from that of the coordinate array by + dropping the first axis. The values of the array along the first axis are + the coordinates in the input array at which the output value is found. + + Args: + input (cupy.ndarray): The input array. + coordinates (array_like): The coordinates at which ``input`` is + evaluated. + output (cupy.ndarray or ~cupy.dtype): The array in which to place the + output, or the dtype of the returned array. + order (int): The order of the spline interpolation, default is 3. Must + be in the range 0-5. + mode (str): Points outside the boundaries of the input are filled + according to the given mode (``'constant'``, ``'nearest'``, + ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``, + ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``). + cval (scalar): Value used for points outside the boundaries of + the input if ``mode='constant'`` or ``mode='opencv'``. Default is + 0.0 + prefilter (bool): Determines if the input array is prefiltered with + ``spline_filter`` before interpolation. The default is True, which + will create a temporary ``float64`` array of filtered values if + ``order > 1``. If setting this to False, the output will be + slightly blurred if ``order > 1``, unless the input is prefiltered, + i.e. it is the result of calling ``spline_filter`` on the original + input. + + Returns: + cupy.ndarray: + The result of transforming the input. The shape of the output is + derived from that of ``coordinates`` by dropping the first axis. + + .. seealso:: :func:`scipy.ndimage.map_coordinates` + """ + + _check_parameter('map_coordinates', order, mode) + + if mode == 'opencv' or mode == '_opencv_edge': + input = cupy.pad(input, [(1, 1)] * input.ndim, 'constant', + constant_values=cval) + coordinates = cupy.add(coordinates, 1) + mode = 'constant' + + ret = _util._get_output(output, input, coordinates.shape[1:]) + integer_output = ret.dtype.kind in 'iu' + _util._check_cval(mode, cval, integer_output) + + if input.dtype.kind in 'iu': + input = input.astype(cupy.float32) + coordinates = _check_coordinates(coordinates, order) + filtered, nprepad = _filter_input(input, prefilter, mode, cval, order) + large_int = max(_prod(input.shape), coordinates.shape[0]) > 1 << 31 + kern = _interp_kernels._get_map_kernel( + input.ndim, large_int, yshape=coordinates.shape, mode=mode, cval=cval, + order=order, integer_output=integer_output, nprepad=nprepad) + kern(filtered, coordinates, ret) + return ret + + +def affine_transform(input, matrix, offset=0.0, output_shape=None, output=None, + order=3, mode='constant', cval=0.0, prefilter=True, *, + texture_memory=False): + """Apply an affine transformation. + + Given an output image pixel index vector ``o``, the pixel value is + determined from the input image at position + ``cupy.dot(matrix, o) + offset``. + + Args: + input (cupy.ndarray): The input array. + matrix (cupy.ndarray): The inverse coordinate transformation matrix, + mapping output coordinates to input coordinates. If ``ndim`` is the + number of dimensions of ``input``, the given matrix must have one + of the following shapes: + + - ``(ndim, ndim)``: the linear transformation matrix for each + output coordinate. + - ``(ndim,)``: assume that the 2D transformation matrix is + diagonal, with the diagonal specified by the given value. + - ``(ndim + 1, ndim + 1)``: assume that the transformation is + specified using homogeneous coordinates. In this case, any + value passed to ``offset`` is ignored. + - ``(ndim, ndim + 1)``: as above, but the bottom row of a + homogeneous transformation matrix is always + ``[0, 0, ..., 1]``, and may be omitted. + + offset (float or sequence): The offset into the array where the + transform is applied. If a float, ``offset`` is the same for each + axis. If a sequence, ``offset`` should contain one value for each + axis. + output_shape (tuple of ints): Shape tuple. + output (cupy.ndarray or ~cupy.dtype): The array in which to place the + output, or the dtype of the returned array. + order (int): The order of the spline interpolation, default is 3. Must + be in the range 0-5. + mode (str): Points outside the boundaries of the input are filled + according to the given mode (``'constant'``, ``'nearest'``, + ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``, + ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``). + cval (scalar): Value used for points outside the boundaries of + the input if ``mode='constant'`` or ``mode='opencv'``. Default is + 0.0 + prefilter (bool): Determines if the input array is prefiltered with + ``spline_filter`` before interpolation. The default is True, which + will create a temporary ``float64`` array of filtered values if + ``order > 1``. If setting this to False, the output will be + slightly blurred if ``order > 1``, unless the input is prefiltered, + i.e. it is the result of calling ``spline_filter`` on the original + input. + texture_memory (bool): If True, uses GPU texture memory. Supports only: + + - 2D and 3D float32 arrays as input + - ``(ndim + 1, ndim + 1)`` homogeneous float32 transformation + matrix + - ``mode='constant'`` and ``mode='nearest'`` + - ``order=0`` (nearest neighbor) and ``order=1`` (linear + interpolation) + - NVIDIA CUDA GPUs + + Returns: + cupy.ndarray or None: + The transformed input. If ``output`` is given as a parameter, + ``None`` is returned. + + .. seealso:: :func:`scipy.ndimage.affine_transform` + """ + + if texture_memory: + if runtime.is_hip: + raise RuntimeError( + 'HIP currently does not support texture acceleration') + tm_interp = 'linear' if order > 0 else 'nearest' + return _texture.affine_transformation(data=input, + transformation_matrix=matrix, + output_shape=output_shape, + output=output, + interpolation=tm_interp, + mode=mode, + border_value=cval) + + _check_parameter('affine_transform', order, mode) + + offset = _util._fix_sequence_arg(offset, input.ndim, 'offset', float) + + if matrix.ndim not in [1, 2] or matrix.shape[0] < 1: + raise RuntimeError('no proper affine matrix provided') + if matrix.ndim == 2: + if matrix.shape[0] == matrix.shape[1] - 1: + offset = matrix[:, -1] + matrix = matrix[:, :-1] + elif matrix.shape[0] == input.ndim + 1: + offset = matrix[:-1, -1] + matrix = matrix[:-1, :-1] + if matrix.shape != (input.ndim, input.ndim): + raise RuntimeError('improper affine shape') + + if mode == 'opencv': + m = cupy.zeros((input.ndim + 1, input.ndim + 1)) + m[:-1, :-1] = matrix + m[:-1, -1] = offset + m[-1, -1] = 1 + m = cupy.linalg.inv(m) + m[:2] = cupy.roll(m[:2], 1, axis=0) + m[:2, :2] = cupy.roll(m[:2, :2], 1, axis=1) + matrix = m[:-1, :-1] + offset = m[:-1, -1] + + if output_shape is None: + output_shape = input.shape + + if mode == 'opencv' or mode == '_opencv_edge': + if matrix.ndim == 1: + matrix = cupy.diag(matrix) + coordinates = cupy.indices(output_shape, dtype=cupy.float64) + coordinates = cupy.dot(matrix, coordinates.reshape((input.ndim, -1))) + coordinates += cupy.expand_dims(cupy.asarray(offset), -1) + ret = _util._get_output(output, input, shape=output_shape) + ret[:] = map_coordinates(input, coordinates, ret.dtype, order, mode, + cval, prefilter).reshape(output_shape) + return ret + + matrix = matrix.astype(cupy.float64, copy=False) + ndim = input.ndim + output = _util._get_output(output, input, shape=output_shape) + if input.dtype.kind in 'iu': + input = input.astype(cupy.float32) + filtered, nprepad = _filter_input(input, prefilter, mode, cval, order) + + integer_output = output.dtype.kind in 'iu' + _util._check_cval(mode, cval, integer_output) + large_int = max(_prod(input.shape), _prod(output_shape)) > 1 << 31 + if matrix.ndim == 1: + offset = cupy.asarray(offset, dtype=cupy.float64) + offset = -offset / matrix + kern = _interp_kernels._get_zoom_shift_kernel( + ndim, large_int, output_shape, mode, cval=cval, order=order, + integer_output=integer_output, nprepad=nprepad) + kern(filtered, offset, matrix, output) + else: + kern = _interp_kernels._get_affine_kernel( + ndim, large_int, output_shape, mode, cval=cval, order=order, + integer_output=integer_output, nprepad=nprepad) + m = cupy.zeros((ndim, ndim + 1), dtype=cupy.float64) + m[:, :-1] = matrix + m[:, -1] = cupy.asarray(offset, dtype=cupy.float64) + kern(filtered, m, output) + return output + + +def _minmax(coor, minc, maxc): + if coor[0] < minc[0]: + minc[0] = coor[0] + if coor[0] > maxc[0]: + maxc[0] = coor[0] + if coor[1] < minc[1]: + minc[1] = coor[1] + if coor[1] > maxc[1]: + maxc[1] = coor[1] + return minc, maxc + + +def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, + mode='constant', cval=0.0, prefilter=True): + """Rotate an array. + + The array is rotated in the plane defined by the two axes given by the + ``axes`` parameter using spline interpolation of the requested order. + + Args: + input (cupy.ndarray): The input array. + angle (float): The rotation angle in degrees. + axes (tuple of 2 ints): The two axes that define the plane of rotation. + Default is the first two axes. + reshape (bool): If ``reshape`` is True, the output shape is adapted so + that the input array is contained completely in the output. Default + is True. + output (cupy.ndarray or ~cupy.dtype): The array in which to place the + output, or the dtype of the returned array. + order (int): The order of the spline interpolation, default is 3. Must + be in the range 0-5. + mode (str): Points outside the boundaries of the input are filled + according to the given mode (``'constant'``, ``'nearest'``, + ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``, + ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``). + cval (scalar): Value used for points outside the boundaries of + the input if ``mode='constant'`` or ``mode='opencv'``. Default is + 0.0 + prefilter (bool): Determines if the input array is prefiltered with + ``spline_filter`` before interpolation. The default is True, which + will create a temporary ``float64`` array of filtered values if + ``order > 1``. If setting this to False, the output will be + slightly blurred if ``order > 1``, unless the input is prefiltered, + i.e. it is the result of calling ``spline_filter`` on the original + input. + + Returns: + cupy.ndarray or None: + The rotated input. + + .. seealso:: :func:`scipy.ndimage.rotate` + """ + + _check_parameter('rotate', order, mode) + + if mode == 'opencv': + mode = '_opencv_edge' + + input_arr = input + axes = list(axes) + if axes[0] < 0: + axes[0] += input_arr.ndim + if axes[1] < 0: + axes[1] += input_arr.ndim + if axes[0] > axes[1]: + axes = [axes[1], axes[0]] + if axes[0] < 0 or input_arr.ndim <= axes[1]: + raise ValueError('invalid rotation plane specified') + + ndim = input_arr.ndim + rad = numpy.deg2rad(angle) + sin = math.sin(rad) + cos = math.cos(rad) + + # determine offsets and output shape as in scipy.ndimage.rotate + rot_matrix = numpy.array([[cos, sin], + [-sin, cos]]) + + img_shape = numpy.asarray(input_arr.shape) + in_plane_shape = img_shape[axes] + if reshape: + # Compute transformed input bounds + iy, ix = in_plane_shape + out_bounds = rot_matrix @ [[0, 0, iy, iy], + [0, ix, 0, ix]] + # Compute the shape of the transformed input plane + out_plane_shape = (numpy.ptp(out_bounds, axis=1) + + 0.5).astype(cupy.int64) + else: + out_plane_shape = img_shape[axes] + + out_center = rot_matrix @ ((out_plane_shape - 1) / 2) + in_center = (in_plane_shape - 1) / 2 + + output_shape = img_shape + output_shape[axes] = out_plane_shape + output_shape = tuple(output_shape) + + matrix = numpy.identity(ndim) + matrix[axes[0], axes[0]] = cos + matrix[axes[0], axes[1]] = sin + matrix[axes[1], axes[0]] = -sin + matrix[axes[1], axes[1]] = cos + + offset = numpy.zeros(ndim, dtype=cupy.float64) + offset[axes] = in_center - out_center + + matrix = cupy.asarray(matrix) + offset = cupy.asarray(offset) + + return affine_transform(input, matrix, offset, output_shape, output, order, + mode, cval, prefilter) + + +def shift(input, shift, output=None, order=3, mode='constant', cval=0.0, + prefilter=True): + """Shift an array. + + The array is shifted using spline interpolation of the requested order. + Points outside the boundaries of the input are filled according to the + given mode. + + Args: + input (cupy.ndarray): The input array. + shift (float or sequence): The shift along the axes. If a float, + ``shift`` is the same for each axis. If a sequence, ``shift`` + should contain one value for each axis. + output (cupy.ndarray or ~cupy.dtype): The array in which to place the + output, or the dtype of the returned array. + order (int): The order of the spline interpolation, default is 3. Must + be in the range 0-5. + mode (str): Points outside the boundaries of the input are filled + according to the given mode (``'constant'``, ``'nearest'``, + ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``, + ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``). + cval (scalar): Value used for points outside the boundaries of + the input if ``mode='constant'`` or ``mode='opencv'``. Default is + 0.0 + prefilter (bool): Determines if the input array is prefiltered with + ``spline_filter`` before interpolation. The default is True, which + will create a temporary ``float64`` array of filtered values if + ``order > 1``. If setting this to False, the output will be + slightly blurred if ``order > 1``, unless the input is prefiltered, + i.e. it is the result of calling ``spline_filter`` on the original + input. + + Returns: + cupy.ndarray or None: + The shifted input. + + .. seealso:: :func:`scipy.ndimage.shift` + """ + + _check_parameter('shift', order, mode) + + shift = _util._fix_sequence_arg(shift, input.ndim, 'shift', float) + + if mode == 'opencv': + mode = '_opencv_edge' + + output = affine_transform( + input, + cupy.ones(input.ndim, input.dtype), + cupy.negative(cupy.asarray(shift)), + None, + output, + order, + mode, + cval, + prefilter, + ) + else: + output = _util._get_output(output, input) + if input.dtype.kind in 'iu': + input = input.astype(cupy.float32) + filtered, nprepad = _filter_input(input, prefilter, mode, cval, order) + integer_output = output.dtype.kind in 'iu' + _util._check_cval(mode, cval, integer_output) + large_int = _prod(input.shape) > 1 << 31 + kern = _interp_kernels._get_shift_kernel( + input.ndim, large_int, input.shape, mode, cval=cval, order=order, + integer_output=integer_output, nprepad=nprepad) + shift = cupy.asarray(shift, dtype=cupy.float64, order='C') + if shift.ndim != 1: + raise ValueError('shift must be 1d') + if shift.size != filtered.ndim: + raise ValueError('len(shift) must equal input.ndim') + kern(filtered, shift, output) + return output + + +def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, + prefilter=True, *, grid_mode=False): + """Zoom an array. + + The array is zoomed using spline interpolation of the requested order. + + Args: + input (cupy.ndarray): The input array. + zoom (float or sequence): The zoom factor along the axes. If a float, + ``zoom`` is the same for each axis. If a sequence, ``zoom`` should + contain one value for each axis. + output (cupy.ndarray or ~cupy.dtype): The array in which to place the + output, or the dtype of the returned array. + order (int): The order of the spline interpolation, default is 3. Must + be in the range 0-5. + mode (str): Points outside the boundaries of the input are filled + according to the given mode (``'constant'``, ``'nearest'``, + ``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``, + ``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``). + cval (scalar): Value used for points outside the boundaries of + the input if ``mode='constant'`` or ``mode='opencv'``. Default is + 0.0 + prefilter (bool): Determines if the input array is prefiltered with + ``spline_filter`` before interpolation. The default is True, which + will create a temporary ``float64`` array of filtered values if + ``order > 1``. If setting this to False, the output will be + slightly blurred if ``order > 1``, unless the input is prefiltered, + i.e. it is the result of calling ``spline_filter`` on the original + input. + grid_mode (bool, optional): If False, the distance from the pixel + centers is zoomed. Otherwise, the distance including the full pixel + extent is used. For example, a 1d signal of length 5 is considered + to have length 4 when ``grid_mode`` is False, but length 5 when + ``grid_mode`` is True. See the following visual illustration: + + .. code-block:: text + + | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 | + |<-------------------------------------->| + vs. + |<----------------------------------------------->| + + The starting point of the arrow in the diagram above corresponds to + coordinate location 0 in each mode. + + Returns: + cupy.ndarray or None: + The zoomed input. + + .. seealso:: :func:`scipy.ndimage.zoom` + """ + + _check_parameter('zoom', order, mode) + + zoom = _util._fix_sequence_arg(zoom, input.ndim, 'zoom', float) + + output_shape = [] + for s, z in zip(input.shape, zoom): + output_shape.append(int(round(s * z))) + output_shape = tuple(output_shape) + + if mode == 'opencv': + zoom = [] + offset = [] + for in_size, out_size in zip(input.shape, output_shape): + if out_size > 0: + zoom.append(float(in_size) / out_size) + offset.append((zoom[-1] - 1) / 2.0) + else: + zoom.append(0) + offset.append(0) + mode = 'nearest' + + output = affine_transform( + input, + cupy.asarray(zoom), + offset, + output_shape, + output, + order, + mode, + cval, + prefilter, + ) + else: + if grid_mode: + + # warn about modes that may have surprising behavior + suggest_mode = None + if mode == 'constant': + suggest_mode = 'grid-constant' + elif mode == 'wrap': + suggest_mode = 'grid-wrap' + if suggest_mode is not None: + warnings.warn( + f'It is recommended to use mode = {suggest_mode} instead ' + f'of {mode} when grid_mode is True.') + + zoom = [] + for in_size, out_size in zip(input.shape, output_shape): + if grid_mode and out_size > 0: + zoom.append(in_size / out_size) + elif out_size > 1: + zoom.append((in_size - 1) / (out_size - 1)) + else: + zoom.append(0) + + output = _util._get_output(output, input, shape=output_shape) + if input.dtype.kind in 'iu': + input = input.astype(cupy.float32) + filtered, nprepad = _filter_input(input, prefilter, mode, cval, order) + integer_output = output.dtype.kind in 'iu' + _util._check_cval(mode, cval, integer_output) + large_int = max(_prod(input.shape), _prod(output_shape)) > 1 << 31 + kern = _interp_kernels._get_zoom_kernel( + input.ndim, large_int, output_shape, mode, order=order, + integer_output=integer_output, grid_mode=grid_mode, + nprepad=nprepad) + zoom = cupy.asarray(zoom, dtype=cupy.float64) + kern(filtered, zoom, output) + return output diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_measurements.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_measurements.py new file mode 100644 index 0000000000000000000000000000000000000000..33f9ec4167135664f4ce84132ea95361bfa1caf7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_measurements.py @@ -0,0 +1,1380 @@ +import warnings + +import numpy + +import cupy +from cupy import _core +from cupy import _util + + +def label(input, structure=None, output=None): + """Labels features in an array. + + Args: + input (cupy.ndarray): The input array. + structure (array_like or None): A structuring element that defines + feature connections. ```structure``` must be centersymmetric. If + None, structure is automatically generated with a squared + connectivity equal to one. + output (cupy.ndarray, dtype or None): The array in which to place the + output. + Returns: + label (cupy.ndarray): An integer array where each unique feature in + ```input``` has a unique label in the array. + + num_features (int): Number of features found. + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`scipy.ndimage.label` + """ + if not isinstance(input, cupy.ndarray): + raise TypeError('input must be cupy.ndarray') + if input.dtype.char in 'FD': + raise TypeError('Complex type not supported') + if structure is None: + structure = _generate_binary_structure(input.ndim, 1) + elif isinstance(structure, cupy.ndarray): + structure = cupy.asnumpy(structure) + structure = numpy.array(structure, dtype=bool) + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have equal rank') + for i in structure.shape: + if i != 3: + raise ValueError('structure dimensions must be equal to 3') + + if isinstance(output, cupy.ndarray): + if output.shape != input.shape: + raise ValueError("output shape not correct") + caller_provided_output = True + else: + caller_provided_output = False + if output is None: + output = cupy.empty(input.shape, numpy.int32) + else: + output = cupy.empty(input.shape, output) + + if input.size == 0: + # empty + maxlabel = 0 + elif input.ndim == 0: + # 0-dim array + maxlabel = 0 if input.item() == 0 else 1 + output.fill(maxlabel) + else: + if output.dtype != numpy.int32: + y = cupy.empty(input.shape, numpy.int32) + else: + y = output + maxlabel = _label(input, structure, y) + if output.dtype != numpy.int32: + _core.elementwise_copy(y, output) + + if caller_provided_output: + return maxlabel + else: + return output, maxlabel + + +def _generate_binary_structure(rank, connectivity): + if connectivity < 1: + connectivity = 1 + if rank < 1: + return numpy.array(True, dtype=bool) + output = numpy.fabs(numpy.indices([3] * rank) - 1) + output = numpy.add.reduce(output, 0) + return output <= connectivity + + +def _label(x, structure, y): + elems = numpy.where(structure != 0) + vecs = [elems[dm] - 1 for dm in range(x.ndim)] + offset = vecs[0] + for dm in range(1, x.ndim): + offset = offset * 3 + vecs[dm] + indxs = numpy.where(offset < 0)[0] + dirs = [[vecs[dm][dr] for dm in range(x.ndim)] for dr in indxs] + dirs = cupy.array(dirs, dtype=numpy.int32) + ndirs = indxs.shape[0] + y_shape = cupy.array(y.shape, dtype=numpy.int32) + count = cupy.zeros(2, dtype=numpy.int32) + _kernel_init()(x, y) + _kernel_connect()(y_shape, dirs, ndirs, x.ndim, y, size=y.size) + _kernel_count()(y, count, size=y.size) + maxlabel = int(count[0]) + labels = cupy.empty(maxlabel, dtype=numpy.int32) + _kernel_labels()(y, count, labels, size=y.size) + _kernel_finalize()(maxlabel, cupy.sort(labels), y, size=y.size) + return maxlabel + + +def _kernel_init(): + return _core.ElementwiseKernel( + 'X x', 'Y y', 'if (x == 0) { y = -1; } else { y = i; }', + 'cupyx_scipy_ndimage_label_init') + + +def _kernel_connect(): + return _core.ElementwiseKernel( + 'raw int32 shape, raw int32 dirs, int32 ndirs, int32 ndim', + 'raw Y y', + ''' + if (y[i] < 0) continue; + for (int dr = 0; dr < ndirs; dr++) { + int j = i; + int rest = j; + int stride = 1; + int k = 0; + for (int dm = ndim-1; dm >= 0; dm--) { + int pos = rest % shape[dm] + dirs[dm + dr * ndim]; + if (pos < 0 || pos >= shape[dm]) { + k = -1; + break; + } + k += pos * stride; + rest /= shape[dm]; + stride *= shape[dm]; + } + if (k < 0) continue; + if (y[k] < 0) continue; + while (1) { + while (j != y[j]) { j = y[j]; } + while (k != y[k]) { k = y[k]; } + if (j == k) break; + if (j < k) { + int old = atomicCAS( &y[k], k, j ); + if (old == k) break; + k = old; + } + else { + int old = atomicCAS( &y[j], j, k ); + if (old == j) break; + j = old; + } + } + } + ''', + 'cupyx_scipy_ndimage_label_connect') + + +def _kernel_count(): + return _core.ElementwiseKernel( + '', 'raw Y y, raw int32 count', + ''' + if (y[i] < 0) continue; + int j = i; + while (j != y[j]) { j = y[j]; } + if (j != i) y[i] = j; + else atomicAdd(&count[0], 1); + ''', + 'cupyx_scipy_ndimage_label_count') + + +def _kernel_labels(): + return _core.ElementwiseKernel( + '', 'raw Y y, raw int32 count, raw int32 labels', + ''' + if (y[i] != i) continue; + int j = atomicAdd(&count[1], 1); + labels[j] = i; + ''', + 'cupyx_scipy_ndimage_label_labels') + + +def _kernel_finalize(): + return _core.ElementwiseKernel( + 'int32 maxlabel', 'raw int32 labels, raw Y y', + ''' + if (y[i] < 0) { + y[i] = 0; + continue; + } + int yi = y[i]; + int j_min = 0; + int j_max = maxlabel - 1; + int j = (j_min + j_max) / 2; + while (j_min < j_max) { + if (yi == labels[j]) break; + if (yi < labels[j]) j_max = j - 1; + else j_min = j + 1; + j = (j_min + j_max) / 2; + } + y[i] = j + 1; + ''', + 'cupyx_scipy_ndimage_label_finalize') + + +_ndimage_variance_kernel = _core.ElementwiseKernel( + 'T input, R labels, raw X index, uint64 size, raw float64 mean', + 'raw float64 out', + """ + for (ptrdiff_t j = 0; j < size; j++) { + if (labels == index[j]) { + atomicAdd(&out[j], (input - mean[j]) * (input - mean[j])); + break; + } + } + """, + 'cupyx_scipy_ndimage_variance') + + +_ndimage_sum_kernel = _core.ElementwiseKernel( + 'T input, R labels, raw X index, uint64 size', + 'raw float64 out', + """ + for (ptrdiff_t j = 0; j < size; j++) { + if (labels == index[j]) { + atomicAdd(&out[j], input); + break; + } + } + """, + 'cupyx_scipy_ndimage_sum') + + +def _ndimage_sum_kernel_2(input, labels, index, sum_val, batch_size=4): + for i in range(0, index.size, batch_size): + matched = labels == index[i:i + batch_size].reshape( + (-1,) + (1,) * input.ndim) + sum_axes = tuple(range(1, 1 + input.ndim)) + sum_val[i:i + batch_size] = cupy.where(matched, input, 0).sum( + axis=sum_axes) + return sum_val + + +_ndimage_mean_kernel = _core.ElementwiseKernel( + 'T input, R labels, raw X index, uint64 size', + 'raw float64 out, raw uint64 count', + """ + for (ptrdiff_t j = 0; j < size; j++) { + if (labels == index[j]) { + atomicAdd(&out[j], input); + atomicAdd(&count[j], 1); + break; + } + } + """, + 'cupyx_scipy_ndimage_mean') + + +def _ndimage_mean_kernel_2(input, labels, index, batch_size=4, + return_count=False): + sum_val = cupy.empty_like(index, dtype=cupy.float64) + count = cupy.empty_like(index, dtype=cupy.uint64) + for i in range(0, index.size, batch_size): + matched = labels == index[i:i + batch_size].reshape( + (-1,) + (1,) * input.ndim) + mean_axes = tuple(range(1, 1 + input.ndim)) + count[i:i + batch_size] = matched.sum(axis=mean_axes) + sum_val[i:i + batch_size] = cupy.where(matched, input, 0).sum( + axis=mean_axes) + if return_count: + return sum_val / count, count + return sum_val / count + + +def _mean_driver(input, labels, index, return_count=False, use_kern=False): + if use_kern: + return _ndimage_mean_kernel_2(input, labels, index, + return_count=return_count) + + out = cupy.zeros_like(index, cupy.float64) + count = cupy.zeros_like(index, dtype=cupy.uint64) + sum, count = _ndimage_mean_kernel(input, + labels, index, index.size, out, count) + if return_count: + return sum / count, count + return sum / count + + +def variance(input, labels=None, index=None): + """Calculates the variance of the values of an n-D image array, optionally + at specified sub-regions. + + Args: + input (cupy.ndarray): Nd-image data to process. + labels (cupy.ndarray or None): Labels defining sub-regions in `input`. + If not None, must be same shape as `input`. + index (cupy.ndarray or None): `labels` to include in output. If None + (default), all values where `labels` is non-zero are used. + + Returns: + cupy.ndarray: Values of variance, for each sub-region if + `labels` and `index` are specified. + + .. seealso:: :func:`scipy.ndimage.variance` + """ + if not isinstance(input, cupy.ndarray): + raise TypeError('input must be cupy.ndarray') + + if input.dtype in (cupy.complex64, cupy.complex128): + raise TypeError("cupyx.scipy.ndimage.variance doesn't support %{}" + "".format(input.dtype.type)) + + use_kern = False + # There are constraints on types because of atomicAdd() in CUDA. + if input.dtype not in [cupy.int32, cupy.float16, cupy.float32, + cupy.float64, cupy.uint32, cupy.uint64, + cupy.ulonglong]: + warnings.warn( + 'Using the slower implementation because the provided ' + f'type {input.dtype} is not supported by cupyx.scipy.ndimage.sum. ' + 'Consider using an array of type int32, float16, ' + 'float32, float64, uint32, uint64 as data types ' + 'for the fast implementation', _util.PerformanceWarning) + use_kern = True + + def calc_var_with_intermediate_float(input): + vals_c = input - input.mean() + count = vals_c.size + # Does not use `ndarray.mean()` here to return the same results as + # SciPy does, especially in case `input`'s dtype is float16. + return cupy.square(vals_c).sum() / cupy.asanyarray(count).astype(float) + + if labels is None: + return calc_var_with_intermediate_float(input) + + if not isinstance(labels, cupy.ndarray): + raise TypeError('label must be cupy.ndarray') + + input, labels = cupy.broadcast_arrays(input, labels) + + if index is None: + return calc_var_with_intermediate_float(input[labels > 0]) + + if cupy.isscalar(index): + return calc_var_with_intermediate_float(input[labels == index]) + + if not isinstance(index, cupy.ndarray): + if not isinstance(index, int): + raise TypeError('index must be cupy.ndarray or a scalar int') + else: + return (input[labels == index]).var().astype(cupy.float64, + copy=False) + + mean_val, count = _mean_driver(input, labels, index, True, use_kern) + if use_kern: + new_axis = (..., *(cupy.newaxis for _ in range(input.ndim))) + return cupy.where(labels[None, ...] == index[new_axis], + cupy.square(input - mean_val[new_axis]), + 0).sum(tuple(range(1, input.ndim + 1))) / count + out = cupy.zeros_like(index, dtype=cupy.float64) + return _ndimage_variance_kernel(input, labels, index, index.size, mean_val, + out) / count + + +def sum_labels(input, labels=None, index=None): + """Calculates the sum of the values of an n-D image array, optionally + at specified sub-regions. + + Args: + input (cupy.ndarray): Nd-image data to process. + labels (cupy.ndarray or None): Labels defining sub-regions in `input`. + If not None, must be same shape as `input`. + index (cupy.ndarray or None): `labels` to include in output. If None + (default), all values where `labels` is non-zero are used. + + Returns: + sum (cupy.ndarray): sum of values, for each sub-region if + `labels` and `index` are specified. + + .. seealso:: :func:`scipy.ndimage.sum_labels` + """ + if not isinstance(input, cupy.ndarray): + raise TypeError('input must be cupy.ndarray') + + if input.dtype in (cupy.complex64, cupy.complex128): + raise TypeError("cupyx.scipy.ndimage.sum does not support %{}".format( + input.dtype.type)) + + use_kern = False + # There is constraints on types because of atomicAdd() in CUDA. + if input.dtype not in [cupy.int32, cupy.float16, cupy.float32, + cupy.float64, cupy.uint32, cupy.uint64, + cupy.ulonglong]: + warnings.warn( + 'Using the slower implementation as ' + 'cupyx.scipy.ndimage.sum supports int32, float16, ' + 'float32, float64, uint32, uint64 as data types' + 'for the fast implmentation', _util.PerformanceWarning) + use_kern = True + + if labels is None: + return input.sum() + + if not isinstance(labels, cupy.ndarray): + raise TypeError('label must be cupy.ndarray') + + input, labels = cupy.broadcast_arrays(input, labels) + + if index is None: + return input[labels != 0].sum() + + if not isinstance(index, cupy.ndarray): + if not isinstance(index, int): + raise TypeError('index must be cupy.ndarray or a scalar int') + else: + return (input[labels == index]).sum() + + if index.size == 0: + return cupy.array([], dtype=cupy.int64) + + out = cupy.zeros_like(index, dtype=cupy.float64) + + # The following parameters for sum where determined using a Tesla P100. + if (input.size >= 262144 and index.size <= 4) or use_kern: + return _ndimage_sum_kernel_2(input, labels, index, out) + return _ndimage_sum_kernel(input, labels, index, index.size, out) + + +def sum(input, labels=None, index=None): + """Calculates the sum of the values of an n-D image array, optionally + at specified sub-regions. + + Args: + input (cupy.ndarray): Nd-image data to process. + labels (cupy.ndarray or None): Labels defining sub-regions in `input`. + If not None, must be same shape as `input`. + index (cupy.ndarray or None): `labels` to include in output. If None + (default), all values where `labels` is non-zero are used. + + Returns: + sum (cupy.ndarray): sum of values, for each sub-region if + `labels` and `index` are specified. + + Notes: + This is an alias for `cupyx.scipy.ndimage.sum_labels` kept for + backwards compatibility reasons. For new code please prefer + `sum_labels`. + + .. seealso:: :func:`scipy.ndimage.sum` + """ + return sum_labels(input, labels, index) + + +def mean(input, labels=None, index=None): + """Calculates the mean of the values of an n-D image array, optionally + at specified sub-regions. + + Args: + input (cupy.ndarray): Nd-image data to process. + labels (cupy.ndarray or None): Labels defining sub-regions in `input`. + If not None, must be same shape as `input`. + index (cupy.ndarray or None): `labels` to include in output. If None + (default), all values where `labels` is non-zero are used. + + Returns: + mean (cupy.ndarray): mean of values, for each sub-region if + `labels` and `index` are specified. + + + .. seealso:: :func:`scipy.ndimage.mean` + """ + if not isinstance(input, cupy.ndarray): + raise TypeError('input must be cupy.ndarray') + + if input.dtype in (cupy.complex64, cupy.complex128): + raise TypeError("cupyx.scipy.ndimage.mean does not support %{}".format( + input.dtype.type)) + + use_kern = False + # There is constraints on types because of atomicAdd() in CUDA. + if input.dtype not in [cupy.int32, cupy.float16, cupy.float32, + cupy.float64, cupy.uint32, cupy.uint64, + cupy.ulonglong]: + warnings.warn( + 'Using the slower implementation as ' + 'cupyx.scipy.ndimage.mean supports int32, float16, ' + 'float32, float64, uint32, uint64 as data types ' + 'for the fast implmentation', _util.PerformanceWarning) + use_kern = True + + def calc_mean_with_intermediate_float(input): + sum = input.sum() + count = input.size + # Does not use `ndarray.mean()` here to return the same results as + # SciPy does, especially in case `input`'s dtype is float16. + return sum / cupy.asanyarray(count).astype(float) + + if labels is None: + return calc_mean_with_intermediate_float(input) + + if not isinstance(labels, cupy.ndarray): + raise TypeError('label must be cupy.ndarray') + + input, labels = cupy.broadcast_arrays(input, labels) + + if index is None: + return calc_mean_with_intermediate_float(input[labels > 0]) + + if cupy.isscalar(index): + return calc_mean_with_intermediate_float(input[labels == index]) + + if not isinstance(index, cupy.ndarray): + if not isinstance(index, int): + raise TypeError('index must be cupy.ndarray or a scalar int') + else: + return (input[labels == index]).mean(dtype=cupy.float64) + + return _mean_driver(input, labels, index, use_kern=use_kern) + + +def standard_deviation(input, labels=None, index=None): + """Calculates the standard deviation of the values of an n-D image array, + optionally at specified sub-regions. + + Args: + input (cupy.ndarray): Nd-image data to process. + labels (cupy.ndarray or None): Labels defining sub-regions in `input`. + If not None, must be same shape as `input`. + index (cupy.ndarray or None): `labels` to include in output. If None + (default), all values where `labels` is non-zero are used. + + Returns: + standard_deviation (cupy.ndarray): standard deviation of values, for + each sub-region if `labels` and `index` are specified. + + .. seealso:: :func:`scipy.ndimage.standard_deviation` + """ + return cupy.sqrt(variance(input, labels, index)) + + +def _safely_castable_to_int(dt): + """Test whether the NumPy data type `dt` can be safely cast to an int.""" + int_size = cupy.dtype(int).itemsize + safe = ( + cupy.issubdtype(dt, cupy.signedinteger) and dt.itemsize <= int_size + ) or (cupy.issubdtype(dt, cupy.unsignedinteger) and dt.itemsize < int_size) + return safe + + +def _get_values(arrays, func): + """Concatenated result of applying func to a list of arrays. + + func should be cupy.min, cupy.max or cupy.median + """ + dtype = arrays[0].dtype + return cupy.concatenate( + [ + func(a, keepdims=True) + if a.size != 0 else cupy.asarray([0], dtype=dtype) + for a in arrays + ] + ) + + +def _get_positions(arrays, position_arrays, arg_func): + """Concatenated positions from applying arg_func to arrays. + + arg_func should be cupy.argmin or cupy.argmax + """ + return cupy.concatenate( + [ + pos[arg_func(a, keepdims=True)] + if a.size != 0 else cupy.asarray([0], dtype=int) + for pos, a in zip(position_arrays, arrays) + ] + ) + + +def _select_via_looping(input, labels, idxs, positions, find_min, + find_min_positions, find_max, find_max_positions, + find_median): + """Internal helper routine for _select. + + With relatively few labels it is faster to call this function rather than + using the implementation based on cupy.lexsort. + """ + find_positions = find_min_positions or find_max_positions + + # extract labeled regions into separate arrays + arrays = [] + position_arrays = [] + for i in idxs: + label_idx = labels == i + arrays.append(input[label_idx]) + if find_positions: + position_arrays.append(positions[label_idx]) + + result = [] + # the order below matches the order expected by cupy.ndimage.extrema + if find_min: + result += [_get_values(arrays, cupy.min)] + if find_min_positions: + result += [_get_positions(arrays, position_arrays, cupy.argmin)] + if find_max: + result += [_get_values(arrays, cupy.max)] + if find_max_positions: + result += [_get_positions(arrays, position_arrays, cupy.argmax)] + if find_median: + result += [_get_values(arrays, cupy.median)] + return result + + +def _select(input, labels=None, index=None, find_min=False, find_max=False, + find_min_positions=False, find_max_positions=False, + find_median=False): + """Return one or more of: min, max, min position, max position, median. + + If neither `labels` or `index` is provided, these are the global values + in `input`. If `index` is None, but `labels` is provided, a global value + across all non-zero labels is given. When both `labels` and `index` are + provided, lists of values are provided for each labeled region specified + in `index`. See further details in :func:`cupyx.scipy.ndimage.minimum`, + etc. + + Used by minimum, maximum, minimum_position, maximum_position, extrema. + """ + find_positions = find_min_positions or find_max_positions + positions = None + if find_positions: + positions = cupy.arange(input.size).reshape(input.shape) + + def single_group(vals, positions): + result = [] + if find_min: + result += [vals.min()] + if find_min_positions: + result += [positions[vals == vals.min()][0]] + if find_max: + result += [vals.max()] + if find_max_positions: + result += [positions[vals == vals.max()][0]] + if find_median: + result += [cupy.median(vals)] + return result + + if labels is None: + return single_group(input, positions) + + # ensure input and labels match sizes + input, labels = cupy.broadcast_arrays(input, labels) + + if index is None: + mask = labels > 0 + masked_positions = None + if find_positions: + masked_positions = positions[mask] + return single_group(input[mask], masked_positions) + + if cupy.isscalar(index): + mask = labels == index + masked_positions = None + if find_positions: + masked_positions = positions[mask] + return single_group(input[mask], masked_positions) + + index = cupy.asarray(index) + + safe_int = _safely_castable_to_int(labels.dtype) + min_label = labels.min() + max_label = labels.max() + + # Remap labels to unique integers if necessary, or if the largest label is + # larger than the number of values. + if (not safe_int or min_label < 0 or max_label > labels.size): + # Remap labels, and indexes + unique_labels, labels = cupy.unique(labels, return_inverse=True) + idxs = cupy.searchsorted(unique_labels, index) + + # Make all of idxs valid + idxs[idxs >= unique_labels.size] = 0 + found = unique_labels[idxs] == index + else: + # Labels are an integer type, and there aren't too many + idxs = cupy.asanyarray(index, int).copy() + found = (idxs >= 0) & (idxs <= max_label) + + idxs[~found] = max_label + 1 + + input = input.ravel() + labels = labels.ravel() + if find_positions: + positions = positions.ravel() + + using_cub = _core._accelerator.ACCELERATOR_CUB in \ + cupy._core.get_routine_accelerators() + + if using_cub: + # Cutoff values below were determined empirically for relatively large + # input arrays. + if find_positions or find_median: + n_label_cutoff = 15 + else: + n_label_cutoff = 30 + else: + n_label_cutoff = 0 + + if n_label_cutoff and len(idxs) <= n_label_cutoff: + return _select_via_looping( + input, labels, idxs, positions, find_min, find_min_positions, + find_max, find_max_positions, find_median + ) + + order = cupy.lexsort(cupy.stack((input.ravel(), labels.ravel()))) + input = input[order] + labels = labels[order] + if find_positions: + positions = positions[order] + + # Determine indices corresponding to the min or max value for each label + label_change_index = cupy.searchsorted(labels, + cupy.arange(1, max_label + 2)) + if find_min or find_min_positions or find_median: + # index corresponding to the minimum value at each label + min_index = label_change_index[:-1] + if find_max or find_max_positions or find_median: + # index corresponding to the maximum value at each label + max_index = label_change_index[1:] - 1 + + result = [] + # the order below matches the order expected by cupy.ndimage.extrema + if find_min: + mins = cupy.zeros(int(labels.max()) + 2, input.dtype) + mins[labels[min_index]] = input[min_index] + result += [mins[idxs]] + if find_min_positions: + minpos = cupy.zeros(labels.max().item() + 2, int) + minpos[labels[min_index]] = positions[min_index] + result += [minpos[idxs]] + if find_max: + maxs = cupy.zeros(int(labels.max()) + 2, input.dtype) + maxs[labels[max_index]] = input[max_index] + result += [maxs[idxs]] + if find_max_positions: + maxpos = cupy.zeros(labels.max().item() + 2, int) + maxpos[labels[max_index]] = positions[max_index] + result += [maxpos[idxs]] + if find_median: + locs = cupy.arange(len(labels)) + lo = cupy.zeros(int(labels.max()) + 2, int) + lo[labels[min_index]] = locs[min_index] + hi = cupy.zeros(int(labels.max()) + 2, int) + hi[labels[max_index]] = locs[max_index] + lo = lo[idxs] + hi = hi[idxs] + # lo is an index to the lowest value in input for each label, + # hi is an index to the largest value. + # move them to be either the same ((hi - lo) % 2 == 0) or next + # to each other ((hi - lo) % 2 == 1), then average. + step = (hi - lo) // 2 + lo += step + hi -= step + if input.dtype.kind in 'iub': + # fix for https://github.com/scipy/scipy/issues/12836 + result += [(input[lo].astype(float) + input[hi].astype(float)) / + 2.0] + else: + result += [(input[lo] + input[hi]) / 2.0] + + return result + + +def minimum(input, labels=None, index=None): + """Calculate the minimum of the values of an array over labeled regions. + + Args: + input (cupy.ndarray): + Array of values. For each region specified by `labels`, the + minimal values of `input` over the region is computed. + labels (cupy.ndarray, optional): An array of integers marking different + regions over which the minimum value of `input` is to be computed. + `labels` must have the same shape as `input`. If `labels` is not + specified, the minimum over the whole array is returned. + index (array_like, optional): A list of region labels that are taken + into account for computing the minima. If `index` is None, the + minimum over all elements where `labels` is non-zero is returned. + + Returns: + cupy.ndarray: Array of minima of `input` over the regions + determined by `labels` and whose index is in `index`. If `index` or + `labels` are not specified, a 0-dimensional cupy.ndarray is + returned: the minimal value of `input` if `labels` is None, + and the minimal value of elements where `labels` is greater than + zero if `index` is None. + + .. seealso:: :func:`scipy.ndimage.minimum` + """ + return _select(input, labels, index, find_min=True)[0] + + +def maximum(input, labels=None, index=None): + """Calculate the maximum of the values of an array over labeled regions. + + Args: + input (cupy.ndarray): + Array of values. For each region specified by `labels`, the + maximal values of `input` over the region is computed. + labels (cupy.ndarray, optional): An array of integers marking different + regions over which the maximum value of `input` is to be computed. + `labels` must have the same shape as `input`. If `labels` is not + specified, the maximum over the whole array is returned. + index (array_like, optional): A list of region labels that are taken + into account for computing the maxima. If `index` is None, the + maximum over all elements where `labels` is non-zero is returned. + + Returns: + cupy.ndarray: Array of maxima of `input` over the regions + determaxed by `labels` and whose index is in `index`. If `index` or + `labels` are not specified, a 0-dimensional cupy.ndarray is + returned: the maximal value of `input` if `labels` is None, + and the maximal value of elements where `labels` is greater than + zero if `index` is None. + + .. seealso:: :func:`scipy.ndimage.maximum` + """ + return _select(input, labels, index, find_max=True)[0] + + +def median(input, labels=None, index=None): + """Calculate the median of the values of an array over labeled regions. + + Args: + input (cupy.ndarray): + Array of values. For each region specified by `labels`, the + median values of `input` over the region is computed. + labels (cupy.ndarray, optional): An array of integers marking different + regions over which the median value of `input` is to be computed. + `labels` must have the same shape as `input`. If `labels` is not + specified, the median over the whole array is returned. + index (array_like, optional): A list of region labels that are taken + into account for computing the medians. If `index` is None, the + median over all elements where `labels` is non-zero is returned. + + Returns: + cupy.ndarray: Array of medians of `input` over the regions + determined by `labels` and whose index is in `index`. If `index` or + `labels` are not specified, a 0-dimensional cupy.ndarray is + returned: the median value of `input` if `labels` is None, + and the median value of elements where `labels` is greater than + zero if `index` is None. + + .. seealso:: :func:`scipy.ndimage.median` + """ + return _select(input, labels, index, find_median=True)[0] + + +def minimum_position(input, labels=None, index=None): + """Find the positions of the minimums of the values of an array at labels. + + For each region specified by `labels`, the position of the minimum + value of `input` within the region is returned. + + Args: + input (cupy.ndarray): + Array of values. For each region specified by `labels`, the + minimal values of `input` over the region is computed. + labels (cupy.ndarray, optional): An array of integers marking different + regions over which the position of the minimum value of `input` is + to be computed. `labels` must have the same shape as `input`. If + `labels` is not specified, the location of the first minimum over + the whole array is returned. + + The `labels` argument only works when `index` is specified. + index (array_like, optional): A list of region labels that are taken + into account for finding the location of the minima. If `index` is + None, the ``first`` minimum over all elements where `labels` is + non-zero is returned. + + The `index` argument only works when `labels` is specified. + + Returns: + Tuple of ints or list of tuples of ints that specify the location of + minima of `input` over the regions determined by `labels` and whose + index is in `index`. + + If `index` or `labels` are not specified, a tuple of ints is returned + specifying the location of the first minimal value of `input`. + + .. note:: + When `input` has multiple identical minima within a labeled region, + the coordinates returned are not guaranteed to match those returned by + SciPy. + + .. seealso:: :func:`scipy.ndimage.minimum_position` + """ + dims = numpy.asarray(input.shape) + # see numpy.unravel_index to understand this line. + dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] + + result = _select(input, labels, index, find_min_positions=True)[0] + + # have to transfer result back to the CPU to return index tuples + if result.ndim == 0: + result = int(result) # synchronize + else: + result = cupy.asnumpy(result) # synchronize + + if cupy.isscalar(result): + return tuple((result // dim_prod) % dims) + + return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] + + +def maximum_position(input, labels=None, index=None): + """Find the positions of the maximums of the values of an array at labels. + + For each region specified by `labels`, the position of the maximum + value of `input` within the region is returned. + + Args: + input (cupy.ndarray): + Array of values. For each region specified by `labels`, the + maximal values of `input` over the region is computed. + labels (cupy.ndarray, optional): An array of integers marking different + regions over which the position of the maximum value of `input` is + to be computed. `labels` must have the same shape as `input`. If + `labels` is not specified, the location of the first maximum over + the whole array is returned. + + The `labels` argument only works when `index` is specified. + index (array_like, optional): A list of region labels that are taken + into account for finding the location of the maxima. If `index` is + None, the ``first`` maximum over all elements where `labels` is + non-zero is returned. + + The `index` argument only works when `labels` is specified. + + Returns: + Tuple of ints or list of tuples of ints that specify the location of + maxima of `input` over the regions determaxed by `labels` and whose + index is in `index`. + + If `index` or `labels` are not specified, a tuple of ints is returned + specifying the location of the first maximal value of `input`. + + .. note:: + When `input` has multiple identical maxima within a labeled region, + the coordinates returned are not guaranteed to match those returned by + SciPy. + + .. seealso:: :func:`scipy.ndimage.maximum_position` + """ + dims = numpy.asarray(input.shape) + # see numpy.unravel_index to understand this line. + dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] + + result = _select(input, labels, index, find_max_positions=True)[0] + + # have to transfer result back to the CPU to return index tuples + if result.ndim == 0: + result = int(result) + else: + result = cupy.asnumpy(result) + + if cupy.isscalar(result): + return tuple((result // dim_prod) % dims) + + return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] + + +def extrema(input, labels=None, index=None): + """Calculate the minimums and maximums of the values of an array at labels, + along with their positions. + + Args: + input (cupy.ndarray): N-D image data to process. + labels (cupy.ndarray, optional): Labels of features in input. If not + None, must be same shape as `input`. + index (int or sequence of ints, optional): Labels to include in output. + If None (default), all values where non-zero `labels` are used. + + Returns: + A tuple that contains the following values. + + **minimums (cupy.ndarray)**: Values of minimums in each feature. + + **maximums (cupy.ndarray)**: Values of maximums in each feature. + + **min_positions (tuple or list of tuples)**: Each tuple gives the N-D + coordinates of the corresponding minimum. + + **max_positions (tuple or list of tuples)**: Each tuple gives the N-D + coordinates of the corresponding maximum. + + .. seealso:: :func:`scipy.ndimage.extrema` + """ + dims = numpy.array(input.shape) + # see numpy.unravel_index to understand this line. + dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] + + minimums, min_positions, maximums, max_positions = _select( + input, + labels, + index, + find_min=True, + find_max=True, + find_min_positions=True, + find_max_positions=True, + ) + + if min_positions.ndim == 0: + # scalar output case + min_positions = min_positions.item() + max_positions = max_positions.item() + return ( + minimums, + maximums, + tuple((min_positions // dim_prod) % dims), + tuple((max_positions // dim_prod) % dims), + ) + + # convert indexes to tuples on the host + min_positions = cupy.asnumpy(min_positions) + max_positions = cupy.asnumpy(max_positions) + min_positions = [ + tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims + ] + max_positions = [ + tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims + ] + + return minimums, maximums, min_positions, max_positions + + +def center_of_mass(input, labels=None, index=None): + """ + Calculate the center of mass of the values of an array at labels. + + Args: + input (cupy.ndarray): Data from which to calculate center-of-mass. The + masses can either be positive or negative. + labels (cupy.ndarray, optional): Labels for objects in `input`, as + enerated by `ndimage.label`. Only used with `index`. Dimensions + must be the same as `input`. + index (int or sequence of ints, optional): Labels for which to + calculate centers-of-mass. If not specified, all labels greater + than zero are used. Only used with `labels`. + + Returns: + tuple or list of tuples: Coordinates of centers-of-mass. + + .. seealso:: :func:`scipy.ndimage.center_of_mass` + """ + normalizer = sum(input, labels, index) + grids = cupy.ogrid[[slice(0, i) for i in input.shape]] + + results = [ + sum(input * grids[dir].astype(float), labels, index) / normalizer + for dir in range(input.ndim) + ] + + # have to transfer 0-dim array back to CPU? + # may want to modify to avoid this + is_0dim_array = ( + isinstance(results[0], cupy.ndarray) and results[0].ndim == 0 + ) + if is_0dim_array: + # tuple of 0-dimensional cupy arrays + return tuple(res for res in results) + # list of cupy coordinate arrays + return [v for v in cupy.stack(results, axis=-1)] + + +def labeled_comprehension( + input, labels, index, func, out_dtype, default, pass_positions=False +): + """Array resulting from applying ``func`` to each labeled region. + + Roughly equivalent to [func(input[labels == i]) for i in index]. + + Sequentially applies an arbitrary function (that works on array_like input) + to subsets of an N-D image array specified by `labels` and `index`. + The option exists to provide the function with positional parameters as the + second argument. + + Args: + input (cupy.ndarray): Data from which to select `labels` to process. + labels (cupy.ndarray or None): Labels to objects in `input`. If not + None, array must be same shape as `input`. If None, `func` is + applied to raveled `input`. + index (int, sequence of ints or None): Subset of `labels` to which to + apply `func`. If a scalar, a single value is returned. If None, + `func` is applied to all non-zero values of `labels`. + func (callable): Python function to apply to `labels` from `input`. + out_dtype (dtype): Dtype to use for `result`. + default (int, float or None): Default return value when a element of + `index` does not exist in `labels`. + pass_positions (bool, optional): If True, pass linear indices to `func` + as a second argument. + + Returns: + cupy.ndarray: Result of applying `func` to each of `labels` to `input` + in `index`. + + .. seealso:: :func:`scipy.ndimage.labeled_comprehension` + """ + as_scalar = cupy.isscalar(index) + input = cupy.asarray(input) + + if pass_positions: + positions = cupy.arange(input.size).reshape(input.shape) + + if labels is None: + if index is not None: + raise ValueError('index without defined labels') + if not pass_positions: + return func(input.ravel()) + else: + return func(input.ravel(), positions.ravel()) + + try: + input, labels = cupy.broadcast_arrays(input, labels) + except ValueError: + raise ValueError( + 'input and labels must have the same shape ' + '(excepting dimensions with width 1)' + ) + + if index is None: + if not pass_positions: + return func(input[labels > 0]) + else: + return func(input[labels > 0], positions[labels > 0]) + + index = cupy.atleast_1d(index) + if cupy.any(index.astype(labels.dtype).astype(index.dtype) != index): + raise ValueError( + 'Cannot convert index values from <%s> to <%s> ' + '(labels.dtype) without loss of precision' + % (index.dtype, labels.dtype) + ) + + index = index.astype(labels.dtype) + + # optimization: find min/max in index, and select those parts of labels, + # input, and positions + lo = index.min() + hi = index.max() + mask = (labels >= lo) & (labels <= hi) + + # this also ravels the arrays + labels = labels[mask] + input = input[mask] + if pass_positions: + positions = positions[mask] + + # sort everything by labels + label_order = labels.argsort() + labels = labels[label_order] + input = input[label_order] + if pass_positions: + positions = positions[label_order] + + index_order = index.argsort() + sorted_index = index[index_order] + + def do_map(inputs, output): + """labels must be sorted""" + nidx = sorted_index.size + + # Find boundaries for each stretch of constant labels + # This could be faster, but we already paid N log N to sort labels. + lo = cupy.searchsorted(labels, sorted_index, side='left') + hi = cupy.searchsorted(labels, sorted_index, side='right') + + for i, low, high in zip(range(nidx), lo, hi): + if low == high: + continue + output[i] = func(*[inp[low:high] for inp in inputs]) + + if out_dtype == object: # noqa: E721 + temp = {i: default for i in range(index.size)} + else: + temp = cupy.empty(index.shape, out_dtype) + if default is None and temp.dtype.kind in 'fc': + default = numpy.nan # match NumPy floating-point None behavior + temp[:] = default + + if not pass_positions: + do_map([input], temp) + else: + do_map([input, positions], temp) + + if out_dtype == object: # noqa: E721 + # use a list of arrays since object arrays are not supported + index_order = cupy.asnumpy(index_order) + output = [temp[i] for i in index_order.argsort()] + else: + output = cupy.zeros(index.shape, out_dtype) + output[cupy.asnumpy(index_order)] = temp + if as_scalar: + output = output[0] + return output + + +def histogram(input, min, max, bins, labels=None, index=None): + """Calculate the histogram of the values of an array, optionally at labels. + + Histogram calculates the frequency of values in an array within bins + determined by `min`, `max`, and `bins`. The `labels` and `index` + keywords can limit the scope of the histogram to specified sub-regions + within the array. + + Args: + input (cupy.ndarray): Data for which to calculate histogram. + min (int): Minimum values of range of histogram bins. + max (int): Maximum values of range of histogram bins. + bins (int): Number of bins. + labels (cupy.ndarray, optional): Labels for objects in `input`. If not + None, must be same shape as `input`. + index (int or sequence of ints, optional): Label or labels for which to + calculate histogram. If None, all values where label is greater + than zero are used. + + Returns: + cupy.ndarray: Histogram counts. + + .. seealso:: :func:`scipy.ndimage.histogram` + """ + _bins = cupy.linspace(min, max, bins + 1) + + def _hist(vals): + return cupy.histogram(vals, _bins)[0] + + return labeled_comprehension( + input, labels, index, _hist, object, None, pass_positions=False + ) + + +def value_indices(arr, *, ignore_value=None, adaptive_index_dtype=False): + """ + Find indices of each distinct value in given array. + + Parameters + ---------- + arr : ndarray of ints + Array containing integer values. + ignore_value : int, optional + This value will be ignored in searching the `arr` array. If not + given, all values found will be included in output. Default + is None. + adaptive_index_dtype : bool, optional + If ``True``, instead of returning the default CuPy signed integer + dtype, the smallest signed integer dtype capable of representing the + image coordinate range will be used. This can substantially reduce + memory usage and slightly reduce runtime. Note that this optional + parameter is not available in the SciPy API. + + Returns + ------- + indices : dictionary + A Python dictionary of array indices for each distinct value. The + dictionary is keyed by the distinct values, the entries are array + index tuples covering all occurrences of the value within the + array. + + This dictionary can occupy significant memory, often several times + the size of the input array. To help reduce memory overhead, the + argument `adaptive_index_dtype` can be set to ``True``. + + Notes + ----- + For a small array with few distinct values, one might use + `numpy.unique()` to find all possible values, and ``(arr == val)`` to + locate each value within that array. However, for large arrays, + with many distinct values, this can become extremely inefficient, + as locating each value would require a new search through the entire + array. Using this function, there is essentially one search, with + the indices saved for all distinct values. + + This is useful when matching a categorical image (e.g. a segmentation + or classification) to an associated image of other data, allowing + any per-class statistic(s) to then be calculated. Provides a + more flexible alternative to functions like ``scipy.ndimage.mean()`` + and ``scipy.ndimage.variance()``. + + Some other closely related functionality, with different strengths and + weaknesses, can also be found in ``scipy.stats.binned_statistic()`` and + the `scikit-image `_ function + ``skimage.measure.regionprops()``. + + Note for IDL users: this provides functionality equivalent to IDL's + REVERSE_INDICES option (as per the IDL documentation for the + `HISTOGRAM `_ + function). + + .. versionadded:: 1.10.0 + + See Also + -------- + label, maximum, median, minimum_position, extrema, sum, mean, variance, + standard_deviation, cupy.where, cupy.unique + + Examples + -------- + >>> import cupy + >>> from cupyx.scipy import ndimage + >>> a = cupy.zeros((6, 6), dtype=int) + >>> a[2:4, 2:4] = 1 + >>> a[4, 4] = 1 + >>> a[:2, :3] = 2 + >>> a[0, 5] = 3 + >>> a + array([[2, 2, 2, 0, 0, 3], + [2, 2, 2, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0]]) + >>> val_indices = ndimage.value_indices(a) + + The dictionary `val_indices` will have an entry for each distinct + value in the input array. + + >>> val_indices.keys() + dict_keys([0, 1, 2, 3]) + + The entry for each value is an index tuple, locating the elements + with that value. + + >>> ndx1 = val_indices[1] + >>> ndx1 + (array([2, 2, 3, 3, 4]), array([2, 3, 2, 3, 4])) + + This can be used to index into the original array, or any other + array with the same shape. + + >>> a[ndx1] + array([1, 1, 1, 1, 1]) + + If the zeros were to be ignored, then the resulting dictionary + would no longer have an entry for zero. + + >>> val_indices = ndimage.value_indices(a, ignore_value=0) + >>> val_indices.keys() + dict_keys([1, 2, 3]) + + """ + if arr.dtype.kind not in 'iu': + raise ValueError('Parameter \'arr\' must be an integer array') + if adaptive_index_dtype: + # determined the minimum signed integer type needed to store the + # index rangle + raveled_int_type = cupy.min_scalar_type(-(int(arr.size) + 1)) + coord_int_type = cupy.min_scalar_type(-(max(arr.shape) + 1)) + arr1d = arr.reshape(-1) + counts = cupy.bincount(arr1d) + + isort = cupy.argsort(arr1d, axis=None) + if adaptive_index_dtype: + isort = isort.astype(raveled_int_type, copy=False) + + coords = cupy.unravel_index(isort, arr.shape) + if adaptive_index_dtype: + coords = tuple(c.astype(coord_int_type, copy=False) for c in coords) + + offset = 0 + out = {} + counts = cupy.asnumpy(counts) # need the counts on the host + for value, count in enumerate(counts): + if count == 0: + continue + elif value == ignore_value: + offset += count + continue + out[value] = tuple(c[offset:offset + count] for c in coords) + offset += count + return out diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_morphology.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_morphology.py new file mode 100644 index 0000000000000000000000000000000000000000..733d904ae3f3d7468b9757a77809ea39eb2b50a0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_morphology.py @@ -0,0 +1,1017 @@ +import operator +import warnings + +import numpy + +import cupy +from cupy import _core + +from cupyx.scipy.ndimage import _filters_core +from cupyx.scipy.ndimage import _util +from cupyx.scipy.ndimage import _filters + + +@cupy.memoize(for_each_device=True) +def _get_binary_erosion_kernel( + w_shape, int_type, offsets, center_is_true, border_value, invert, masked, + all_weights_nonzero +): + if invert: + border_value = int(not border_value) + true_val = 0 + false_val = 1 + else: + true_val = 1 + false_val = 0 + + if masked: + pre = """ + bool mv = (bool)mask[i]; + bool _in = (bool)x[i]; + if (!mv) {{ + y = cast(_in); + return; + }} else if ({center_is_true} && _in == {false_val}) {{ + y = cast(_in); + return; + }}""".format(center_is_true=int(center_is_true), + false_val=false_val) + else: + pre = """ + bool _in = (bool)x[i]; + if ({center_is_true} && _in == {false_val}) {{ + y = cast(_in); + return; + }}""".format(center_is_true=int(center_is_true), + false_val=false_val) + pre = pre + """ + y = cast({true_val});""".format(true_val=true_val) + + # {{{{ required because format is called again within _generate_nd_kernel + found = """ + if ({{cond}}) {{{{ + if (!{border_value}) {{{{ + y = cast({false_val}); + return; + }}}} + }}}} else {{{{ + bool nn = {{value}} ? {true_val} : {false_val}; + if (!nn) {{{{ + y = cast({false_val}); + return; + }}}} + }}}}""".format(true_val=int(true_val), + false_val=int(false_val), + border_value=int(border_value),) + + name = 'binary_erosion' + if false_val: + name += '_invert' + return _filters_core._generate_nd_kernel( + name, + pre, + found, + '', + 'constant', w_shape, int_type, offsets, 0, ctype='Y', has_weights=True, + has_structure=False, has_mask=masked, binary_morphology=True, + all_weights_nonzero=all_weights_nonzero) + + +def _center_is_true(structure, origin): + coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape, origin)]) + return bool(structure[coor]) # device synchronization + + +def iterate_structure(structure, iterations, origin=None): + """Iterate a structure by dilating it with itself. + + Args: + structure(array_like): Structuring element (an array of bools, + for example), to be dilated with itself. + iterations(int): The number of dilations performed on the structure + with itself. + origin(int or tuple of int, optional): If origin is None, only the + iterated structure is returned. If not, a tuple of the iterated + structure and the modified origin is returned. + + Returns: + cupy.ndarray: A new structuring element obtained by dilating + ``structure`` (``iterations`` - 1) times with itself. + + .. seealso:: :func:`scipy.ndimage.iterate_structure` + """ + if iterations < 2: + return structure.copy() + ni = iterations - 1 + shape = [ii + ni * (ii - 1) for ii in structure.shape] + pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))] + slc = tuple( + slice(pos[ii], pos[ii] + structure.shape[ii], None) + for ii in range(len(shape)) + ) + out = cupy.zeros(shape, bool) + out[slc] = structure != 0 + out = binary_dilation(out, structure, iterations=ni) + if origin is None: + return out + else: + origin = _util._fix_sequence_arg(origin, structure.ndim, 'origin', int) + origin = [iterations * o for o in origin] + return out, origin + + +def generate_binary_structure(rank, connectivity): + """Generate a binary structure for binary morphological operations. + + Args: + rank(int): Number of dimensions of the array to which the structuring + element will be applied, as returned by ``np.ndim``. + connectivity(int): ``connectivity`` determines which elements of the + output array belong to the structure, i.e., are considered as + neighbors of the central element. Elements up to a squared distance + of ``connectivity`` from the center are considered neighbors. + ``connectivity`` may range from 1 (no diagonal elements are + neighbors) to ``rank`` (all elements are neighbors). + + Returns: + cupy.ndarray: Structuring element which may be used for binary + morphological operations, with ``rank`` dimensions and all + dimensions equal to 3. + + .. seealso:: :func:`scipy.ndimage.generate_binary_structure` + """ + if connectivity < 1: + connectivity = 1 + if rank < 1: + return cupy.asarray(True, dtype=bool) + output = numpy.fabs(numpy.indices([3] * rank) - 1) + output = numpy.add.reduce(output, 0) + output = output <= connectivity + return cupy.asarray(output) + + +def _binary_erosion(input, structure, iterations, mask, output, border_value, + origin, invert, brute_force=True): + try: + iterations = operator.index(iterations) + except TypeError: + raise TypeError('iterations parameter should be an integer') + + if input.dtype.kind == 'c': + raise TypeError('Complex type not supported') + if structure is None: + structure = generate_binary_structure(input.ndim, 1) + all_weights_nonzero = input.ndim == 1 + center_is_true = True + default_structure = True + else: + structure = structure.astype(dtype=bool, copy=False) + # transfer to CPU for use in determining if it is fully dense + # structure_cpu = cupy.asnumpy(structure) + default_structure = False + if structure.ndim != input.ndim: + raise RuntimeError('structure and input must have same dimensionality') + if not structure.flags.c_contiguous: + structure = cupy.ascontiguousarray(structure) + if structure.size < 1: + raise RuntimeError('structure must not be empty') + + if mask is not None: + if mask.shape != input.shape: + raise RuntimeError('mask and input must have equal sizes') + if not mask.flags.c_contiguous: + mask = cupy.ascontiguousarray(mask) + masked = True + else: + masked = False + origin = _util._fix_sequence_arg(origin, input.ndim, 'origin', int) + + if isinstance(output, cupy.ndarray): + if output.dtype.kind == 'c': + raise TypeError('Complex output type not supported') + else: + output = bool + output = _util._get_output(output, input) + temp_needed = cupy.shares_memory(output, input, 'MAY_SHARE_BOUNDS') + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _util._get_output(output.dtype, input) + if structure.ndim == 0: + # kernel doesn't handle ndim=0, so special case it here + if float(structure): + output[...] = cupy.asarray(input, dtype=bool) + else: + output[...] = ~cupy.asarray(input, dtype=bool) + return output + origin = tuple(origin) + int_type = _util._get_inttype(input) + offsets = _filters_core._origins_to_offsets(origin, structure.shape) + if not default_structure: + # synchronize required to determine if all weights are non-zero + nnz = int(cupy.count_nonzero(structure)) + all_weights_nonzero = nnz == structure.size + if all_weights_nonzero: + center_is_true = True + else: + center_is_true = _center_is_true(structure, origin) + + erode_kernel = _get_binary_erosion_kernel( + structure.shape, int_type, offsets, center_is_true, border_value, + invert, masked, all_weights_nonzero, + ) + + if iterations == 1: + if masked: + output = erode_kernel(input, structure, mask, output) + else: + output = erode_kernel(input, structure, output) + elif center_is_true and not brute_force: + raise NotImplementedError( + 'only brute_force iteration has been implemented' + ) + else: + if cupy.shares_memory(output, input, 'MAY_SHARE_BOUNDS'): + raise ValueError('output and input may not overlap in memory') + tmp_in = cupy.empty_like(input, dtype=output.dtype) + tmp_out = output + if iterations >= 1 and not iterations & 1: + tmp_in, tmp_out = tmp_out, tmp_in + if masked: + tmp_out = erode_kernel(input, structure, mask, tmp_out) + else: + tmp_out = erode_kernel(input, structure, tmp_out) + # TODO: kernel doesn't return the changed status, so determine it here + changed = not (input == tmp_out).all() # synchronize! + ii = 1 + while ii < iterations or ((iterations < 1) and changed): + tmp_in, tmp_out = tmp_out, tmp_in + if masked: + tmp_out = erode_kernel(tmp_in, structure, mask, tmp_out) + else: + tmp_out = erode_kernel(tmp_in, structure, tmp_out) + changed = not (tmp_in == tmp_out).all() + ii += 1 + if not changed and (not ii & 1): # synchronize! + # can exit early if nothing changed + # (only do this after even number of tmp_in/out swaps) + break + output = tmp_out + if temp_needed: + _core.elementwise_copy(output, temp) + output = temp + return output + + +def binary_erosion(input, structure=None, iterations=1, mask=None, output=None, + border_value=0, origin=0, brute_force=False): + """Multidimensional binary erosion with a given structuring element. + + Binary erosion is a mathematical morphology operation used for image + processing. + + Args: + input(cupy.ndarray): The input binary array_like to be eroded. + Non-zero (True) elements form the subset to be eroded. + structure(cupy.ndarray, optional): The structuring element used for the + erosion. Non-zero elements are considered True. If no structuring + element is provided an element is generated with a square + connectivity equal to one. (Default value = None). + iterations(int, optional): The erosion is repeated ``iterations`` times + (one, by default). If iterations is less than 1, the erosion is + repeated until the result does not change anymore. Only an integer + of iterations is accepted. + mask(cupy.ndarray or None, optional): If a mask is given, only those + elements with a True value at the corresponding mask element are + modified at each iteration. (Default value = None) + output(cupy.ndarray, optional): Array of the same shape as input, into + which the output is placed. By default, a new array is created. + border_value(int (cast to 0 or 1), optional): Value at the + border in the output array. (Default value = 0) + origin(int or tuple of ints, optional): Placement of the filter, by + default 0. + brute_force(boolean, optional): Memory condition: if False, only the + pixels whose value was changed in the last iteration are tracked as + candidates to be updated (eroded) in the current iteration; if + True all pixels are considered as candidates for erosion, + regardless of what happened in the previous iteration. + + Returns: + cupy.ndarray: The result of binary erosion. + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`scipy.ndimage.binary_erosion` + """ + return _binary_erosion(input, structure, iterations, mask, output, + border_value, origin, 0, brute_force) + + +def binary_dilation(input, structure=None, iterations=1, mask=None, + output=None, border_value=0, origin=0, brute_force=False): + """Multidimensional binary dilation with the given structuring element. + + Args: + input(cupy.ndarray): The input binary array_like to be dilated. + Non-zero (True) elements form the subset to be dilated. + structure(cupy.ndarray, optional): The structuring element used for the + dilation. Non-zero elements are considered True. If no structuring + element is provided an element is generated with a square + connectivity equal to one. (Default value = None). + iterations(int, optional): The dilation is repeated ``iterations`` + times (one, by default). If iterations is less than 1, the dilation + is repeated until the result does not change anymore. Only an + integer of iterations is accepted. + mask(cupy.ndarray or None, optional): If a mask is given, only those + elements with a True value at the corresponding mask element are + modified at each iteration. (Default value = None) + output(cupy.ndarray, optional): Array of the same shape as input, into + which the output is placed. By default, a new array is created. + border_value(int (cast to 0 or 1), optional): Value at the + border in the output array. (Default value = 0) + origin(int or tuple of ints, optional): Placement of the filter, by + default 0. + brute_force(boolean, optional): Memory condition: if False, only the + pixels whose value was changed in the last iteration are tracked as + candidates to be updated (dilated) in the current iteration; if + True all pixels are considered as candidates for dilation, + regardless of what happened in the previous iteration. + + Returns: + cupy.ndarray: The result of binary dilation. + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`scipy.ndimage.binary_dilation` + """ + if structure is None: + structure = generate_binary_structure(input.ndim, 1) + origin = _util._fix_sequence_arg(origin, input.ndim, 'origin', int) + structure = structure[tuple([slice(None, None, -1)] * structure.ndim)] + for ii in range(len(origin)): + origin[ii] = -origin[ii] + if not structure.shape[ii] & 1: + origin[ii] -= 1 + return _binary_erosion(input, structure, iterations, mask, output, + border_value, origin, 1, brute_force) + + +def binary_opening(input, structure=None, iterations=1, output=None, origin=0, + mask=None, border_value=0, brute_force=False): + """ + Multidimensional binary opening with the given structuring element. + + The *opening* of an input image by a structuring element is the + *dilation* of the *erosion* of the image by the structuring element. + + Args: + input(cupy.ndarray): The input binary array to be opened. + Non-zero (True) elements form the subset to be opened. + structure(cupy.ndarray, optional): The structuring element used for the + opening. Non-zero elements are considered True. If no structuring + element is provided an element is generated with a square + connectivity equal to one. (Default value = None). + iterations(int, optional): The opening is repeated ``iterations`` times + (one, by default). If iterations is less than 1, the opening is + repeated until the result does not change anymore. Only an integer + of iterations is accepted. + output(cupy.ndarray, optional): Array of the same shape as input, into + which the output is placed. By default, a new array is created. + origin(int or tuple of ints, optional): Placement of the filter, by + default 0. + mask(cupy.ndarray or None, optional): If a mask is given, only those + elements with a True value at the corresponding mask element are + modified at each iteration. (Default value = None) + border_value(int (cast to 0 or 1), optional): Value at the + border in the output array. (Default value = 0) + brute_force(boolean, optional): Memory condition: if False, only the + pixels whose value was changed in the last iteration are tracked as + candidates to be updated (dilated) in the current iteration; if + True all pixels are considered as candidates for opening, + regardless of what happened in the previous iteration. + + Returns: + cupy.ndarray: The result of binary opening. + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`scipy.ndimage.binary_opening` + """ + if structure is None: + rank = input.ndim + structure = generate_binary_structure(rank, 1) + tmp = binary_erosion(input, structure, iterations, mask, None, + border_value, origin, brute_force) + return binary_dilation(tmp, structure, iterations, mask, output, + border_value, origin, brute_force) + + +def binary_closing(input, structure=None, iterations=1, output=None, origin=0, + mask=None, border_value=0, brute_force=False): + """ + Multidimensional binary closing with the given structuring element. + + The *closing* of an input image by a structuring element is the + *erosion* of the *dilation* of the image by the structuring element. + + Args: + input(cupy.ndarray): The input binary array to be closed. + Non-zero (True) elements form the subset to be closed. + structure(cupy.ndarray, optional): The structuring element used for the + closing. Non-zero elements are considered True. If no structuring + element is provided an element is generated with a square + connectivity equal to one. (Default value = None). + iterations(int, optional): The closing is repeated ``iterations`` times + (one, by default). If iterations is less than 1, the closing is + repeated until the result does not change anymore. Only an integer + of iterations is accepted. + output(cupy.ndarray, optional): Array of the same shape as input, into + which the output is placed. By default, a new array is created. + origin(int or tuple of ints, optional): Placement of the filter, by + default 0. + mask(cupy.ndarray or None, optional): If a mask is given, only those + elements with a True value at the corresponding mask element are + modified at each iteration. (Default value = None) + border_value(int (cast to 0 or 1), optional): Value at the + border in the output array. (Default value = 0) + brute_force(boolean, optional): Memory condition: if False, only the + pixels whose value was changed in the last iteration are tracked as + candidates to be updated (dilated) in the current iteration; if + True all pixels are considered as candidates for closing, + regardless of what happened in the previous iteration. + + Returns: + cupy.ndarray: The result of binary closing. + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`scipy.ndimage.binary_closing` + """ + if structure is None: + rank = input.ndim + structure = generate_binary_structure(rank, 1) + tmp = binary_dilation(input, structure, iterations, mask, None, + border_value, origin, brute_force) + return binary_erosion(tmp, structure, iterations, mask, output, + border_value, origin, brute_force) + + +def binary_hit_or_miss(input, structure1=None, structure2=None, output=None, + origin1=0, origin2=None): + """ + Multidimensional binary hit-or-miss transform. + + The hit-or-miss transform finds the locations of a given pattern + inside the input image. + + Args: + input (cupy.ndarray): Binary image where a pattern is to be detected. + structure1 (cupy.ndarray, optional): Part of the structuring element to + be fitted to the foreground (non-zero elements) of ``input``. If no + value is provided, a structure of square connectivity 1 is chosen. + structure2 (cupy.ndarray, optional): Second part of the structuring + element that has to miss completely the foreground. If no value is + provided, the complementary of ``structure1`` is taken. + output (cupy.ndarray, dtype or None, optional): Array of the same shape + as input, into which the output is placed. By default, a new array + is created. + origin1 (int or tuple of ints, optional): Placement of the first part + of the structuring element ``structure1``, by default 0 for a + centered structure. + origin2 (int or tuple of ints or None, optional): Placement of the + second part of the structuring element ``structure2``, by default 0 + for a centered structure. If a value is provided for ``origin1`` + and not for ``origin2``, then ``origin2`` is set to ``origin1``. + + Returns: + cupy.ndarray: Hit-or-miss transform of ``input`` with the given + structuring element (``structure1``, ``structure2``). + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`scipy.ndimage.binary_hit_or_miss` + """ + if structure1 is None: + structure1 = generate_binary_structure(input.ndim, 1) + if structure2 is None: + structure2 = cupy.logical_not(structure1) + origin1 = _util._fix_sequence_arg(origin1, input.ndim, 'origin1', int) + if origin2 is None: + origin2 = origin1 + else: + origin2 = _util._fix_sequence_arg(origin2, input.ndim, 'origin2', int) + + tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, 0, + False) + inplace = isinstance(output, cupy.ndarray) + result = _binary_erosion(input, structure2, 1, None, output, 0, origin2, 1, + False) + if inplace: + cupy.logical_not(output, output) + cupy.logical_and(tmp1, output, output) + else: + cupy.logical_not(result, result) + return cupy.logical_and(tmp1, result) + + +def binary_propagation(input, structure=None, mask=None, output=None, + border_value=0, origin=0): + """ + Multidimensional binary propagation with the given structuring element. + + Args: + input (cupy.ndarray): Binary image to be propagated inside ``mask``. + structure (cupy.ndarray, optional): Structuring element used in the + successive dilations. The output may depend on the structuring + element, especially if ``mask`` has several connex components. If + no structuring element is provided, an element is generated with a + squared connectivity equal to one. + mask (cupy.ndarray, optional): Binary mask defining the region into + which ``input`` is allowed to propagate. + output (cupy.ndarray, optional): Array of the same shape as input, into + which the output is placed. By default, a new array is created. + border_value (int, optional): Value at the border in the output array. + The value is cast to 0 or 1. + origin (int or tuple of ints, optional): Placement of the filter. + + Returns: + cupy.ndarray : Binary propagation of ``input`` inside ``mask``. + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`scipy.ndimage.binary_propagation` + """ + return binary_dilation(input, structure, -1, mask, output, border_value, + origin, brute_force=True) + + +def binary_fill_holes(input, structure=None, output=None, origin=0): + """Fill the holes in binary objects. + + Args: + input (cupy.ndarray): N-D binary array with holes to be filled. + structure (cupy.ndarray, optional): Structuring element used in the + computation; large-size elements make computations faster but may + miss holes separated from the background by thin regions. The + default element (with a square connectivity equal to one) yields + the intuitive result where all holes in the input have been filled. + output (cupy.ndarray, dtype or None, optional): Array of the same shape + as input, into which the output is placed. By default, a new array + is created. + origin (int, tuple of ints, optional): Position of the structuring + element. + + Returns: + cupy.ndarray: Transformation of the initial image ``input`` where holes + have been filled. + + .. warning:: + + This function may synchronize the device. + + .. seealso:: :func:`scipy.ndimage.binary_fill_holes` + """ + mask = cupy.logical_not(input) + tmp = cupy.zeros(mask.shape, bool) + inplace = isinstance(output, cupy.ndarray) + # TODO (grlee77): set brute_force=False below once implemented + if inplace: + binary_dilation(tmp, structure, -1, mask, output, 1, origin, + brute_force=True) + cupy.logical_not(output, output) + else: + output = binary_dilation(tmp, structure, -1, mask, None, 1, origin, + brute_force=True) + cupy.logical_not(output, output) + return output + + +def grey_erosion(input, size=None, footprint=None, structure=None, output=None, + mode='reflect', cval=0.0, origin=0): + """Calculates a greyscale erosion. + + Args: + input (cupy.ndarray): The input array. + size (tuple of ints): Shape of a flat and full structuring element used + for the greyscale erosion. Optional if ``footprint`` or + ``structure`` is provided. + footprint (array of ints): Positions of non-infinite elements of a flat + structuring element used for greyscale erosion. Non-zero values + give the set of neighbors of the center over which minimum is + chosen. + structure (array of ints): Structuring element used for the greyscale + erosion. ``structure`` may be a non-flat structuring element. + output (cupy.ndarray, dtype or None): The array in which to place the + output. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``constant``. Default is ``0.0``. + origin (scalar or tuple of scalar): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of greyscale erosion. + + .. seealso:: :func:`scipy.ndimage.grey_erosion` + """ + + if size is None and footprint is None and structure is None: + raise ValueError('size, footprint or structure must be specified') + + return _filters._min_or_max_filter(input, size, footprint, structure, + output, mode, cval, origin, 'min') + + +def grey_dilation(input, size=None, footprint=None, structure=None, + output=None, mode='reflect', cval=0.0, origin=0): + """Calculates a greyscale dilation. + + Args: + input (cupy.ndarray): The input array. + size (tuple of ints): Shape of a flat and full structuring element used + for the greyscale dilation. Optional if ``footprint`` or + ``structure`` is provided. + footprint (array of ints): Positions of non-infinite elements of a flat + structuring element used for greyscale dilation. Non-zero values + give the set of neighbors of the center over which maximum is + chosen. + structure (array of ints): Structuring element used for the greyscale + dilation. ``structure`` may be a non-flat structuring element. + output (cupy.ndarray, dtype or None): The array in which to place the + output. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``constant``. Default is ``0.0``. + origin (scalar or tuple of scalar): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of greyscale dilation. + + .. seealso:: :func:`scipy.ndimage.grey_dilation` + """ + + if size is None and footprint is None and structure is None: + raise ValueError('size, footprint or structure must be specified') + if structure is not None: + structure = cupy.array(structure) + structure = structure[tuple([slice(None, None, -1)] * structure.ndim)] + if footprint is not None: + footprint = cupy.array(footprint) + footprint = footprint[tuple([slice(None, None, -1)] * footprint.ndim)] + + origin = _util._fix_sequence_arg(origin, input.ndim, 'origin', int) + for i in range(len(origin)): + origin[i] = -origin[i] + if footprint is not None: + sz = footprint.shape[i] + elif structure is not None: + sz = structure.shape[i] + elif numpy.isscalar(size): + sz = size + else: + sz = size[i] + if sz % 2 == 0: + origin[i] -= 1 + + return _filters._min_or_max_filter(input, size, footprint, structure, + output, mode, cval, origin, 'max') + + +def grey_closing(input, size=None, footprint=None, structure=None, + output=None, mode='reflect', cval=0.0, origin=0): + """Calculates a multi-dimensional greyscale closing. + + Args: + input (cupy.ndarray): The input array. + size (tuple of ints): Shape of a flat and full structuring element used + for the greyscale closing. Optional if ``footprint`` or + ``structure`` is provided. + footprint (array of ints): Positions of non-infinite elements of a flat + structuring element used for greyscale closing. Non-zero values + give the set of neighbors of the center over which closing is + chosen. + structure (array of ints): Structuring element used for the greyscale + closing. ``structure`` may be a non-flat structuring element. + output (cupy.ndarray, dtype or None): The array in which to place the + output. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``constant``. Default is ``0.0``. + origin (scalar or tuple of scalar): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of greyscale closing. + + .. seealso:: :func:`scipy.ndimage.grey_closing` + """ + if (size is not None) and (footprint is not None): + warnings.warn('ignoring size because footprint is set', UserWarning, + stacklevel=2) + tmp = grey_dilation(input, size, footprint, structure, None, mode, cval, + origin) + return grey_erosion(tmp, size, footprint, structure, output, mode, cval, + origin) + + +def grey_opening(input, size=None, footprint=None, structure=None, + output=None, mode='reflect', cval=0.0, origin=0): + """Calculates a multi-dimensional greyscale opening. + + Args: + input (cupy.ndarray): The input array. + size (tuple of ints): Shape of a flat and full structuring element used + for the greyscale opening. Optional if ``footprint`` or + ``structure`` is provided. + footprint (array of ints): Positions of non-infinite elements of a flat + structuring element used for greyscale opening. Non-zero values + give the set of neighbors of the center over which opening is + chosen. + structure (array of ints): Structuring element used for the greyscale + opening. ``structure`` may be a non-flat structuring element. + output (cupy.ndarray, dtype or None): The array in which to place the + output. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``constant``. Default is ``0.0``. + origin (scalar or tuple of scalar): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The result of greyscale opening. + + .. seealso:: :func:`scipy.ndimage.grey_opening` + """ + if (size is not None) and (footprint is not None): + warnings.warn('ignoring size because footprint is set', UserWarning, + stacklevel=2) + tmp = grey_erosion(input, size, footprint, structure, None, mode, cval, + origin) + return grey_dilation(tmp, size, footprint, structure, output, mode, cval, + origin) + + +def morphological_gradient( + input, + size=None, + footprint=None, + structure=None, + output=None, + mode='reflect', + cval=0.0, + origin=0, +): + """ + Multidimensional morphological gradient. + + The morphological gradient is calculated as the difference between a + dilation and an erosion of the input with a given structuring element. + + Args: + input (cupy.ndarray): The input array. + size (tuple of ints): Shape of a flat and full structuring element used + for the morphological gradient. Optional if ``footprint`` or + ``structure`` is provided. + footprint (array of ints): Positions of non-infinite elements of a flat + structuring element used for morphological gradient. Non-zero + values give the set of neighbors of the center over which opening + is chosen. + structure (array of ints): Structuring element used for the + morphological gradient. ``structure`` may be a non-flat + structuring element. + output (cupy.ndarray, dtype or None): The array in which to place the + output. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``constant``. Default is ``0.0``. + origin (scalar or tuple of scalar): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The morphological gradient of the input. + + .. seealso:: :func:`scipy.ndimage.morphological_gradient` + """ + tmp = grey_dilation( + input, size, footprint, structure, None, mode, cval, origin + ) + if isinstance(output, cupy.ndarray): + grey_erosion( + input, size, footprint, structure, output, mode, cval, origin + ) + return cupy.subtract(tmp, output, output) + else: + return tmp - grey_erosion( + input, size, footprint, structure, None, mode, cval, origin + ) + + +def morphological_laplace( + input, + size=None, + footprint=None, + structure=None, + output=None, + mode='reflect', + cval=0.0, + origin=0, +): + """ + Multidimensional morphological laplace. + + Args: + input (cupy.ndarray): The input array. + size (tuple of ints): Shape of a flat and full structuring element used + for the morphological laplace. Optional if ``footprint`` or + ``structure`` is provided. + footprint (array of ints): Positions of non-infinite elements of a flat + structuring element used for morphological laplace. Non-zero + values give the set of neighbors of the center over which opening + is chosen. + structure (array of ints): Structuring element used for the + morphological laplace. ``structure`` may be a non-flat + structuring element. + output (cupy.ndarray, dtype or None): The array in which to place the + output. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``constant``. Default is ``0.0``. + origin (scalar or tuple of scalar): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: The morphological laplace of the input. + + .. seealso:: :func:`scipy.ndimage.morphological_laplace` + """ + tmp1 = grey_dilation( + input, size, footprint, structure, None, mode, cval, origin + ) + if isinstance(output, cupy.ndarray): + grey_erosion( + input, size, footprint, structure, output, mode, cval, origin + ) + cupy.add(tmp1, output, output) + cupy.subtract(output, input, output) + return cupy.subtract(output, input, output) + else: + tmp2 = grey_erosion( + input, size, footprint, structure, None, mode, cval, origin + ) + cupy.add(tmp1, tmp2, tmp2) + cupy.subtract(tmp2, input, tmp2) + cupy.subtract(tmp2, input, tmp2) + return tmp2 + + +def white_tophat( + input, + size=None, + footprint=None, + structure=None, + output=None, + mode='reflect', + cval=0.0, + origin=0, +): + """ + Multidimensional white tophat filter. + + Args: + input (cupy.ndarray): The input array. + size (tuple of ints): Shape of a flat and full structuring element used + for the white tophat. Optional if ``footprint`` or ``structure`` is + provided. + footprint (array of ints): Positions of non-infinite elements of a flat + structuring element used for the white tophat. Non-zero values + give the set of neighbors of the center over which opening is + chosen. + structure (array of ints): Structuring element used for the white + tophat. ``structure`` may be a non-flat structuring element. + output (cupy.ndarray, dtype or None): The array in which to place the + output. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``constant``. Default is ``0.0``. + origin (scalar or tuple of scalar): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarray: Result of the filter of ``input`` with ``structure``. + + .. seealso:: :func:`scipy.ndimage.white_tophat` + """ + if (size is not None) and (footprint is not None): + warnings.warn( + 'ignoring size because footprint is set', UserWarning, stacklevel=2 + ) + tmp = grey_erosion( + input, size, footprint, structure, None, mode, cval, origin + ) + tmp = grey_dilation( + tmp, size, footprint, structure, output, mode, cval, origin + ) + if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_: + cupy.bitwise_xor(input, tmp, out=tmp) + else: + cupy.subtract(input, tmp, out=tmp) + return tmp + + +def black_tophat( + input, + size=None, + footprint=None, + structure=None, + output=None, + mode='reflect', + cval=0.0, + origin=0, +): + """ + Multidimensional black tophat filter. + + Args: + input (cupy.ndarray): The input array. + size (tuple of ints): Shape of a flat and full structuring element used + for the black tophat. Optional if ``footprint`` or ``structure`` is + provided. + footprint (array of ints): Positions of non-infinite elements of a flat + structuring element used for the black tophat. Non-zero values + give the set of neighbors of the center over which opening is + chosen. + structure (array of ints): Structuring element used for the black + tophat. ``structure`` may be a non-flat structuring element. + output (cupy.ndarray, dtype or None): The array in which to place the + output. + mode (str): The array borders are handled according to the given mode + (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``, + ``'wrap'``). Default is ``'reflect'``. + cval (scalar): Value to fill past edges of input if mode is + ``constant``. Default is ``0.0``. + origin (scalar or tuple of scalar): The origin parameter controls the + placement of the filter, relative to the center of the current + element of the input. Default of 0 is equivalent to + ``(0,)*input.ndim``. + + Returns: + cupy.ndarry : Result of the filter of ``input`` with ``structure``. + + .. seealso:: :func:`scipy.ndimage.black_tophat` + """ + if (size is not None) and (footprint is not None): + warnings.warn( + 'ignoring size because footprint is set', UserWarning, stacklevel=2 + ) + tmp = grey_dilation( + input, size, footprint, structure, None, mode, cval, origin + ) + tmp = grey_erosion( + tmp, size, footprint, structure, output, mode, cval, origin + ) + if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_: + cupy.bitwise_xor(tmp, input, out=tmp) + else: + cupy.subtract(tmp, input, out=tmp) + return tmp diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_pba_2d.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_pba_2d.py new file mode 100644 index 0000000000000000000000000000000000000000..b94a64a158405dfa1289834617bd8bc3f2984f84 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_pba_2d.py @@ -0,0 +1,503 @@ +import math +import numbers +import os + +import cupy + +from ._util import _get_inttype + +if hasattr(math, 'lcm'): + lcm = math.lcm +else: + """Fallback implementation of least common multiple (lcm)""" + def _lcm(a, b): + return abs(b * (a // math.gcd(a, b))) + + def lcm(*integers): + nargs = len(integers) + if not all(isinstance(a, numbers.Integral) for a in integers): + raise TypeError("all arguments must be integers") + if nargs == 0: + return 1 + res = int(integers[0]) + if nargs == 1: + return abs(res) + for i in range(1, nargs): + x = int(integers[i]) + res = _lcm(res, x) + return res + + +pba2d_defines_template = """ + +// MARKER is used to mark blank pixels in the texture. +// Any uncolored pixels will have x = MARKER. +// Input texture should have x = MARKER for all pixels other than sites +#define MARKER {marker} +#define BLOCKSIZE {block_size_2d} +#define pixel_int2_t {pixel_int2_t} // typically short2 (int2 for images with > 32k pixels per side) +#define make_pixel(x, y) {make_pixel_func}(x, y) // typically make_short2 (make_int2 images with > 32k pixels per side + +""" # noqa + + +def _init_marker(int_dtype): + """use a minimum value that is appropriate to the integer dtype""" + if int_dtype == cupy.int16: + # marker = cupy.iinfo(int_dtype).min + marker = -32768 + elif int_dtype == cupy.int32: + # divide by two so we don't have to promote other intermediate int + # variables to 64-bit int + marker = -2147483648 // 2 + else: + raise ValueError( + "expected int_dtype to be either cupy.int16 or cupy.int32" + ) + return marker + + +@cupy.memoize(True) +def get_pba2d_src(block_size_2d=64, marker=-32768, pixel_int2_t="short2"): + make_pixel_func = "make_" + pixel_int2_t + + pba2d_code = pba2d_defines_template.format( + block_size_2d=block_size_2d, + marker=marker, + pixel_int2_t=pixel_int2_t, + make_pixel_func=make_pixel_func + ) + kernel_directory = os.path.join(os.path.dirname(__file__), "cuda") + with open(os.path.join(kernel_directory, "pba_kernels_2d.h"), "rt") as f: + pba2d_kernels = "\n".join(f.readlines()) + + pba2d_code += pba2d_kernels + return pba2d_code + + +def _get_block_size(check_warp_size=False): + if check_warp_size: + dev = cupy.cuda.runtime.getDevice() + device_properties = cupy.cuda.runtime.getDeviceProperties(dev) + return int(device_properties["warpSize"]) + else: + return 32 + + +@cupy.memoize(for_each_device=True) +def _get_pack_kernel(int_type, marker=-32768): + """Pack coordinates into array of type short2 (or int2). + + This kernel works with 2D input data, `arr` (typically boolean). + + The output array, `out` will be 3D with a signed integer dtype. + It will have size 2 on the last axis so that it can be viewed as a CUDA + vector type such as `int2` or `float2`. + """ + code = f""" + if (arr[i]) {{ + out[2*i] = {marker}; + out[2*i + 1] = {marker}; + }} else {{ + int shape_1 = arr.shape()[1]; + int _i = i; + int ind_1 = _i % shape_1; + _i /= shape_1; + out[2*i] = ind_1; // out.x + out[2*i + 1] = _i; // out.y + }} + """ + return cupy.ElementwiseKernel( + in_params="raw B arr", + out_params="raw I out", + operation=code, + options=("--std=c++11",), + ) + + +def _pack_int2(arr, marker=-32768, int_dtype=cupy.int16): + if arr.ndim != 2: + raise ValueError("only 2d arr supported") + int2_dtype = cupy.dtype({"names": ["x", "y"], "formats": [int_dtype] * 2}) + out = cupy.zeros(arr.shape + (2,), dtype=int_dtype) + assert out.size == 2 * arr.size + pack_kernel = _get_pack_kernel( + int_type="short" if int_dtype == cupy.int16 else "int", + marker=marker + ) + pack_kernel(arr, out, size=arr.size) + out = cupy.squeeze(out.view(int2_dtype)) + return out + + +def _unpack_int2(img, make_copy=False, int_dtype=cupy.int16): + temp = img.view(int_dtype).reshape(img.shape + (2,)) + if make_copy: + temp = temp.copy() + return temp + + +def _determine_padding(shape, padded_size, block_size): + # all kernels assume equal size along both axes, so pad up to equal size if + # shape is not isotropic + orig_sy, orig_sx = shape + if orig_sx != padded_size or orig_sy != padded_size: + padding_width = ( + (0, padded_size - orig_sy), (0, padded_size - orig_sx) + ) + else: + padding_width = None + return padding_width + + +def _generate_shape(ndim, int_type, var_name="out", raw_var=True): + code = "" + if not raw_var: + var_name = "_raw_" + var_name + for i in range(ndim): + code += f"{int_type} shape_{i} = {var_name}.shape()[{i}];\n" + return code + + +def _generate_indices_ops(ndim, int_type): + code = f"{int_type} _i = i;\n" + for j in range(ndim - 1, 0, -1): + code += f"{int_type} ind_{j} = _i % shape_{j};\n_i /= shape_{j};\n" + code += f"{int_type} ind_0 = _i;" + return code + + +def _get_distance_kernel_code(int_type, dist_int_type, raw_out_var=True): + code = _generate_shape( + ndim=2, int_type=int_type, var_name="dist", raw_var=raw_out_var + ) + code += _generate_indices_ops(ndim=2, int_type=int_type) + code += f""" + {int_type} tmp; + {dist_int_type} sq_dist; + tmp = y[i] - ind_0; + sq_dist = tmp * tmp; + tmp = x[i] - ind_1; + sq_dist += tmp * tmp; + dist[i] = sqrt(static_cast(sq_dist)); + """ + return code + + +@cupy.memoize(for_each_device=True) +def _get_distance_kernel(int_type, dist_int_type): + """Returns kernel computing the Euclidean distance from coordinates.""" + operation = _get_distance_kernel_code( + int_type, dist_int_type, raw_out_var=True + ) + return cupy.ElementwiseKernel( + in_params="raw I y, raw I x", + out_params="raw F dist", + operation=operation, + options=("--std=c++11",), + ) + + +def _get_aniso_distance_kernel_code(int_type, raw_out_var=True): + code = _generate_shape( + ndim=2, int_type=int_type, var_name="dist", raw_var=raw_out_var + ) + code += _generate_indices_ops(ndim=2, int_type=int_type) + code += """ + F tmp; + F sq_dist; + tmp = static_cast(y[i] - ind_0) * sampling[0]; + sq_dist = tmp * tmp; + tmp = static_cast(x[i] - ind_1) * sampling[1]; + sq_dist += tmp * tmp; + dist[i] = sqrt(sq_dist); + """ + return code + + +@cupy.memoize(for_each_device=True) +def _get_aniso_distance_kernel(int_type): + """Returns kernel computing the Euclidean distance from coordinates.""" + operation = _get_aniso_distance_kernel_code(int_type, raw_out_var=True) + return cupy.ElementwiseKernel( + in_params="raw I y, raw I x, raw F sampling", + out_params="raw F dist", + operation=operation, + options=("--std=c++11",), + ) + + +def _distance_tranform_arg_check(distances_out, indices_out, + return_distances, return_indices): + """Raise a RuntimeError if the arguments are invalid""" + error_msgs = [] + if (not return_distances) and (not return_indices): + error_msgs.append( + "at least one of return_distances/return_indices must be True") + if distances_out and not return_distances: + error_msgs.append( + "return_distances must be True if distances is supplied" + ) + if indices_out and not return_indices: + error_msgs.append("return_indices must be True if indices is supplied") + if error_msgs: + raise RuntimeError(", ".join(error_msgs)) + + +def _check_distances(distances, shape, dtype): + if distances.shape != shape: + raise RuntimeError("distances array has wrong shape") + if distances.dtype != dtype: + raise RuntimeError( + f"distances array must have dtype: {dtype}") + + +def _check_indices(indices, shape, itemsize): + if indices.shape != shape: + raise RuntimeError("indices array has wrong shape") + if indices.dtype.kind not in 'iu': + raise RuntimeError( + "indices array must have an integer dtype" + ) + elif indices.dtype.itemsize < itemsize: + raise RuntimeError( + f"indices dtype must have itemsize > {itemsize}" + ) + + +def _pba_2d(arr, sampling=None, return_distances=True, return_indices=False, + block_params=None, check_warp_size=False, *, + float64_distances=False, distances=None, indices=None): + + indices_inplace = isinstance(indices, cupy.ndarray) + dt_inplace = isinstance(distances, cupy.ndarray) + _distance_tranform_arg_check( + dt_inplace, indices_inplace, return_distances, return_indices + ) + + # input_arr: a 2D image + # For each site at (x, y), the pixel at coordinate (x, y) should contain + # the pair (x, y). Pixels that are not sites should contain the pair + # (MARKER, MARKER) + + # Note: could query warp size here, but for now just assume 32 to avoid + # overhead of querying properties + block_size = _get_block_size(check_warp_size) + + if block_params is None: + padded_size = math.ceil(max(arr.shape) / block_size) * block_size + + # should be <= size / block_size. sy must be a multiple of m1 + m1 = padded_size // block_size + # size must be a multiple of m2 + m2 = max(1, min(padded_size // block_size, block_size)) + # m2 must also be a power of two + m2 = 2**math.floor(math.log2(m2)) + if padded_size % m2 != 0: + raise RuntimeError("error in setting default m2") + m3 = min(min(m1, m2), 2) + else: + if any(p < 1 for p in block_params): + raise ValueError("(m1, m2, m3) in blockparams must be >= 1") + m1, m2, m3 = block_params + if math.log2(m2) % 1 > 1e-5: + raise ValueError("m2 must be a power of 2") + multiple = lcm(block_size, m1, m2, m3) + padded_size = math.ceil(max(arr.shape) / multiple) * multiple + + if m1 > padded_size // block_size: + raise ValueError( + f"m1 too large. must be <= padded arr.shape[0] // {block_size}" + ) + if m2 > padded_size // block_size: + raise ValueError( + f"m2 too large. must be <= padded arr.shape[1] // {block_size}" + ) + if m3 > padded_size // block_size: + raise ValueError( + f"m3 too large. must be <= padded arr.shape[1] // {block_size}" + ) + for m in (m1, m2, m3): + if padded_size % m != 0: + raise ValueError( + f"Largest dimension of image ({padded_size}) must be evenly " + f"disivible by each element of block_params: {(m1, m2, m3)}." + ) + + shape_max = max(arr.shape) + if shape_max <= 32768: + int_dtype = cupy.int16 + pixel_int2_type = "short2" + else: + if shape_max > (1 << 24): + # limit to coordinate range to 2**24 due to use of __mul24 in + # coordinate TOID macro + raise ValueError( + f"maximum axis size of {1 << 24} exceeded, for image with " + f"shape {arr.shape}" + ) + int_dtype = cupy.int32 + pixel_int2_type = "int2" + + marker = _init_marker(int_dtype) + + orig_sy, orig_sx = arr.shape + padding_width = _determine_padding(arr.shape, padded_size, block_size) + if padding_width is not None: + arr = cupy.pad(arr, padding_width, mode="constant", constant_values=1) + size = arr.shape[0] + + input_arr = _pack_int2(arr, marker=marker, int_dtype=int_dtype) + output = cupy.zeros_like(input_arr) + + int2_dtype = cupy.dtype({"names": ["x", "y"], "formats": [int_dtype] * 2}) + margin = cupy.empty((2 * m1 * size,), dtype=int2_dtype) + + # phase 1 of PBA. m1 must divide texture size and be <= 64 + pba2d = cupy.RawModule( + code=get_pba2d_src( + block_size_2d=block_size, + marker=marker, + pixel_int2_t=pixel_int2_type, + ) + ) + kernelFloodDown = pba2d.get_function("kernelFloodDown") + kernelFloodUp = pba2d.get_function("kernelFloodUp") + kernelPropagateInterband = pba2d.get_function("kernelPropagateInterband") + kernelUpdateVertical = pba2d.get_function("kernelUpdateVertical") + kernelCreateForwardPointers = pba2d.get_function( + "kernelCreateForwardPointers" + ) + kernelDoubleToSingleList = pba2d.get_function("kernelDoubleToSingleList") + + if sampling is None: + kernelProximatePoints = pba2d.get_function("kernelProximatePoints") + kernelMergeBands = pba2d.get_function("kernelMergeBands") + kernelColor = pba2d.get_function("kernelColor") + else: + kernelProximatePoints = pba2d.get_function( + "kernelProximatePointsWithSpacing" + ) + kernelMergeBands = pba2d.get_function("kernelMergeBandsWithSpacing") + kernelColor = pba2d.get_function("kernelColorWithSpacing") + + block = (block_size, 1, 1) + grid = (math.ceil(size / block[0]), m1, 1) + bandSize1 = size // m1 + # kernelFloodDown modifies input_arr in-place + kernelFloodDown( + grid, + block, + (input_arr, input_arr, size, bandSize1), + ) + # kernelFloodUp modifies input_arr in-place + kernelFloodUp( + grid, + block, + (input_arr, input_arr, size, bandSize1), + ) + # kernelFloodUp fills values into margin + kernelPropagateInterband( + grid, + block, + (input_arr, margin, size, bandSize1), + ) + # kernelUpdateVertical stores output into an intermediate array of + # transposed shape + kernelUpdateVertical( + grid, + block, + (input_arr, margin, output, size, bandSize1), + ) + + # phase 2 + block = (block_size, 1, 1) + grid = (math.ceil(size / block[0]), m2, 1) + bandSize2 = size // m2 + if sampling is None: + sampling_args = () + else: + # Originally the shape is (y, x) and sampling[1] corresponds to y. + # However, kernelUpdateVertical transposed the image, so + # we are now working with (x, y) instead. Need sampling ordered + # accordingly. + sampling = tuple(map(float, sampling)) + sampling_args = (sampling[0], sampling[1]) + kernelProximatePoints( + grid, + block, + (output, input_arr, size, bandSize2) + sampling_args, + ) + kernelCreateForwardPointers( + grid, + block, + (input_arr, input_arr, size, bandSize2), + ) + # Repeatedly merging two bands into one + noBand = m2 + while noBand > 1: + grid = (math.ceil(size / block[0]), noBand // 2) + kernelMergeBands( + grid, + block, + (output, input_arr, input_arr, size, size // noBand) + sampling_args, # noqa + ) + noBand //= 2 + # Replace the forward link with the X coordinate of the seed to remove + # the need of looking at the other texture. We need it for coloring. + grid = (math.ceil(size / block[0]), size) + kernelDoubleToSingleList( + grid, + block, + (output, input_arr, input_arr, size), + ) + + # Phase 3 of PBA + block = (block_size, m3, 1) + grid = (math.ceil(size / block[0]), 1, 1) + kernelColor( + grid, + block, + (input_arr, output, size) + sampling_args, + ) + + output = _unpack_int2(output, make_copy=False, int_dtype=int_dtype) + # make sure to crop any padding that was added here! + x = output[:orig_sy, :orig_sx, 0] + y = output[:orig_sy, :orig_sx, 1] + + vals = () + if return_distances: + dtype_out = cupy.float64 if float64_distances else cupy.float32 + if dt_inplace: + _check_distances(distances, y.shape, dtype_out) + else: + distances = cupy.zeros(y.shape, dtype=dtype_out) + + # make sure maximum possible distance doesn"t overflow + max_possible_dist = sum((s - 1)**2 for s in y.shape) + dist_int_type = "int" if max_possible_dist < 2**31 else "ptrdiff_t" + + if sampling is None: + distance_kernel = _get_distance_kernel( + int_type=_get_inttype(distances), + dist_int_type=dist_int_type, + ) + distance_kernel(y, x, distances, size=distances.size) + else: + distance_kernel = _get_aniso_distance_kernel( + int_type=_get_inttype(distances), + ) + sampling = cupy.asarray(sampling, dtype=dtype_out) + distance_kernel(y, x, sampling, distances, size=distances.size) + + vals = vals + (distances,) + if return_indices: + if indices_inplace: + _check_indices(indices, (arr.ndim,) + arr.shape, x.dtype.itemsize) + indices[0, ...] = y + indices[1, ...] = x + else: + indices = cupy.stack((y, x), axis=0) + vals = vals + (indices,) + return vals diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_pba_3d.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_pba_3d.py new file mode 100644 index 0000000000000000000000000000000000000000..714eeabd1b2bf16451d05c9d8ebfdf5cfe8f6ad9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_pba_3d.py @@ -0,0 +1,491 @@ +import math +import os + +import cupy +import numpy as np + +from ._util import _get_inttype +from ._pba_2d import (_check_distances, _check_indices, + _distance_tranform_arg_check, _generate_indices_ops, + _generate_shape, _get_block_size, lcm) + +pba3d_defines_template = """ + +#define MARKER {marker} +#define MAX_INT {max_int} +#define BLOCKSIZE {block_size_3d} + +""" + +# For efficiency, the original PBA+ packs three 10-bit integers and two binary +# flags into a single 32-bit integer. The defines in +# `pba3d_defines_encode_32bit` handle this format. +pba3d_defines_encode_32bit = """ +// Sites : ENCODE(x, y, z, 0, 0) +// Not sites : ENCODE(0, 0, 0, 1, 0) or MARKER +#define ENCODED_INT_TYPE int +#define ZERO 0 +#define ONE 1 +#define ENCODE(x, y, z, a, b) (((x) << 20) | ((y) << 10) | (z) | ((a) << 31) | ((b) << 30)) +#define DECODE(value, x, y, z) \ + x = ((value) >> 20) & 0x3ff; \ + y = ((value) >> 10) & 0x3ff; \ + z = (value) & 0x3ff + +#define NOTSITE(value) (((value) >> 31) & 1) +#define HASNEXT(value) (((value) >> 30) & 1) + +#define GET_X(value) (((value) >> 20) & 0x3ff) +#define GET_Y(value) (((value) >> 10) & 0x3ff) +#define GET_Z(value) ((NOTSITE((value))) ? MAX_INT : ((value) & 0x3ff)) + +""" # noqa + + +# 64bit version of ENCODE/DECODE to allow a 20-bit integer per coordinate axis. +pba3d_defines_encode_64bit = """ +// Sites : ENCODE(x, y, z, 0, 0) +// Not sites : ENCODE(0, 0, 0, 1, 0) or MARKER +#define ENCODED_INT_TYPE long long +#define ZERO 0L +#define ONE 1L +#define ENCODE(x, y, z, a, b) (((x) << 40) | ((y) << 20) | (z) | ((a) << 61) | ((b) << 60)) +#define DECODE(value, x, y, z) \ + x = ((value) >> 40) & 0xfffff; \ + y = ((value) >> 20) & 0xfffff; \ + z = (value) & 0xfffff + +#define NOTSITE(value) (((value) >> 61) & 1) +#define HASNEXT(value) (((value) >> 60) & 1) + +#define GET_X(value) (((value) >> 40) & 0xfffff) +#define GET_Y(value) (((value) >> 20) & 0xfffff) +#define GET_Z(value) ((NOTSITE((value))) ? MAX_INT : ((value) & 0xfffff)) + +""" # noqa + + +@cupy.memoize(True) +def get_pba3d_src(block_size_3d=32, marker=-2147483648, max_int=2147483647, + size_max=1024): + pba3d_code = pba3d_defines_template.format( + block_size_3d=block_size_3d, marker=marker, max_int=max_int + ) + if size_max > 1024: + pba3d_code += pba3d_defines_encode_64bit + else: + pba3d_code += pba3d_defines_encode_32bit + kernel_directory = os.path.join(os.path.dirname(__file__), "cuda") + with open(os.path.join(kernel_directory, "pba_kernels_3d.h"), "rt") as f: + pba3d_kernels = "\n".join(f.readlines()) + pba3d_code += pba3d_kernels + return pba3d_code + + +@cupy.memoize(for_each_device=True) +def _get_encode3d_kernel(size_max, marker=-2147483648): + """Pack array coordinates into a single integer.""" + if size_max > 1024: + int_type = "ptrdiff_t" # int64_t + else: + int_type = "int" # int32_t + + # value must match TOID macro in the C++ code! + if size_max > 1024: + value = """(((x) << 40) | ((y) << 20) | (z))""" + else: + value = """(((x) << 20) | ((y) << 10) | (z))""" + + code = f""" + if (arr[i]) {{ + out[i] = {marker}; + }} else {{ + {int_type} shape_2 = arr.shape()[2]; + {int_type} shape_1 = arr.shape()[1]; + {int_type} _i = i; + {int_type} x = _i % shape_2; + _i /= shape_2; + {int_type} y = _i % shape_1; + _i /= shape_1; + {int_type} z = _i; + out[i] = {value}; + }} + """ + return cupy.ElementwiseKernel( + in_params="raw B arr", + out_params="raw I out", + operation=code, + options=("--std=c++11",), + ) + + +def encode3d(arr, marker=-2147483648, bit_depth=32, size_max=1024): + if arr.ndim != 3: + raise ValueError("only 3d arr supported") + if bit_depth not in [32, 64]: + raise ValueError("only bit_depth of 32 or 64 is supported") + if size_max > 1024: + dtype = np.int64 + else: + dtype = np.int32 + image = cupy.zeros(arr.shape, dtype=dtype, order="C") + kern = _get_encode3d_kernel(size_max, marker=marker) + kern(arr, image, size=image.size) + return image + + +def _get_decode3d_code(size_max, int_type=""): + # bit shifts here must match those used in the encode3d kernel + if size_max > 1024: + code = f""" + {int_type} x = (encoded >> 40) & 0xfffff; + {int_type} y = (encoded >> 20) & 0xfffff; + {int_type} z = encoded & 0xfffff; + """ + else: + code = f""" + {int_type} x = (encoded >> 20) & 0x3ff; + {int_type} y = (encoded >> 10) & 0x3ff; + {int_type} z = encoded & 0x3ff; + """ + return code + + +@cupy.memoize(for_each_device=True) +def _get_decode3d_kernel(size_max): + """Unpack 3 coordinates encoded as a single integer.""" + + # int_type = "" here because x, y, z were already allocated externally + code = _get_decode3d_code(size_max, int_type="") + + return cupy.ElementwiseKernel( + in_params="E encoded", + out_params="I x, I y, I z", + operation=code, + options=("--std=c++11",), + ) + + +def decode3d(encoded, size_max=1024): + coord_dtype = cupy.int32 if size_max < 2**31 else cupy.int64 + x = cupy.empty_like(encoded, dtype=coord_dtype) + y = cupy.empty_like(x) + z = cupy.empty_like(x) + kern = _get_decode3d_kernel(size_max) + kern(encoded, x, y, z) + return (x, y, z) + + +def _determine_padding(shape, block_size, m1, m2, m3, blockx, blocky): + # TODO: can possibly revise to consider only particular factors for LCM on + # a given axis + LCM = lcm(block_size, m1, m2, m3, blockx, blocky) + orig_sz, orig_sy, orig_sx = shape + round_up = False + if orig_sx % LCM != 0: + # round up size to a multiple of the band size + round_up = True + sx = LCM * math.ceil(orig_sx / LCM) + else: + sx = orig_sx + if orig_sy % LCM != 0: + # round up size to a multiple of the band size + round_up = True + sy = LCM * math.ceil(orig_sy / LCM) + else: + sy = orig_sy + if orig_sz % LCM != 0: + # round up size to a multiple of the band size + round_up = True + sz = LCM * math.ceil(orig_sz / LCM) + else: + sz = orig_sz + + aniso = not (sx == sy == sz) + if aniso or round_up: + smax = max(sz, sy, sx) + padding_width = ( + (0, smax - orig_sz), (0, smax - orig_sy), (0, smax - orig_sx) + ) + else: + padding_width = None + return padding_width + + +def _generate_distance_computation(int_type, dist_int_type): + """ + Compute euclidean distance from current coordinate (ind_0, ind_1, ind_2) to + the coordinates of the nearest point (z, y, x).""" + return f""" + {int_type} tmp = z - ind_0; + {dist_int_type} sq_dist = tmp * tmp; + tmp = y - ind_1; + sq_dist += tmp * tmp; + tmp = x - ind_2; + sq_dist += tmp * tmp; + dist[i] = sqrt(static_cast(sq_dist)); + """ + + +def _get_distance_kernel_code(int_type, dist_int_type, raw_out_var=True): + code = _generate_shape( + ndim=3, int_type=int_type, var_name="dist", raw_var=raw_out_var + ) + code += _generate_indices_ops(ndim=3, int_type=int_type) + code += _generate_distance_computation(int_type, dist_int_type) + return code + + +@cupy.memoize(for_each_device=True) +def _get_distance_kernel(int_type, large_dist=False): + """Returns kernel computing the Euclidean distance from coordinates.""" + dist_int_type = "ptrdiff_t" if large_dist else "int" + operation = _get_distance_kernel_code( + int_type, dist_int_type, raw_out_var=True + ) + return cupy.ElementwiseKernel( + in_params="I z, I y, I x", + out_params="raw F dist", + operation=operation, + options=("--std=c++11",), + ) + + +def _generate_aniso_distance_computation(): + """ + Compute euclidean distance from current coordinate (ind_0, ind_1, ind_2) to + the coordinates of the nearest point (z, y, x).""" + return """ + F tmp = static_cast(z - ind_0) * sampling[0]; + F sq_dist = tmp * tmp; + tmp = static_cast(y - ind_1) * sampling[1]; + sq_dist += tmp * tmp; + tmp = static_cast(x - ind_2) * sampling[2]; + sq_dist += tmp * tmp; + dist[i] = sqrt(static_cast(sq_dist)); + """ + + +def _get_aniso_distance_kernel_code(int_type, raw_out_var=True): + code = _generate_shape( + ndim=3, int_type=int_type, var_name="dist", raw_var=raw_out_var + ) + code += _generate_indices_ops(ndim=3, int_type=int_type) + code += _generate_aniso_distance_computation() + return code + + +@cupy.memoize(for_each_device=True) +def _get_aniso_distance_kernel(int_type): + """Returns kernel computing the Euclidean distance from coordinates with + axis spacing != 1.""" + operation = _get_aniso_distance_kernel_code( + int_type, raw_out_var=True + ) + return cupy.ElementwiseKernel( + in_params="I z, I y, I x, raw F sampling", + out_params="raw F dist", + operation=operation, + options=("--std=c++11",), + ) + + +@cupy.memoize(for_each_device=True) +def _get_decode_as_distance_kernel(size_max, large_dist=False, sampling=None): + """Fused decode3d and distance computation. + + This kernel is for use when `return_distances=True`, but + `return_indices=False`. It replaces the separate calls to + `_get_decode3d_kernel` and `_get_distance_kernel`, avoiding the overhead of + generating full arrays containing the coordinates since the coordinate + arrays are not going to be returned. + """ + if sampling is None: + dist_int_type = "ptrdiff_t" if large_dist else "int" + int_type = "int" + + # Step 1: decode the (z, y, x) coordinate + code = _get_decode3d_code(size_max, int_type=int_type) + + # Step 2: compute the Euclidean distance based on this (z, y, x). + code += _generate_shape( + ndim=3, int_type=int_type, var_name="dist", raw_var=True + ) + code += _generate_indices_ops(ndim=3, int_type=int_type) + if sampling is None: + code += _generate_distance_computation(int_type, dist_int_type) + in_params = "E encoded" + else: + code += _generate_aniso_distance_computation() + in_params = "E encoded, raw F sampling" + return cupy.ElementwiseKernel( + in_params=in_params, + out_params="raw F dist", + operation=code, + options=("--std=c++11",), + ) + + +def _pba_3d(arr, sampling=None, return_distances=True, return_indices=False, + block_params=None, check_warp_size=False, *, + float64_distances=False, distances=None, indices=None): + + indices_inplace = isinstance(indices, cupy.ndarray) + dt_inplace = isinstance(distances, cupy.ndarray) + _distance_tranform_arg_check( + dt_inplace, indices_inplace, return_distances, return_indices + ) + + if arr.ndim != 3: + raise ValueError(f"expected a 3D array, got {arr.ndim}D") + + if block_params is None: + m1 = 1 + m2 = 1 + m3 = 2 + else: + m1, m2, m3 = block_params + + # reduce blockx for small inputs + s_min = min(arr.shape) + if s_min <= 4: + blockx = 4 + elif s_min <= 8: + blockx = 8 + elif s_min <= 16: + blockx = 16 + else: + blockx = 32 + blocky = 4 + + block_size = _get_block_size(check_warp_size) + + orig_sz, orig_sy, orig_sx = arr.shape + padding_width = _determine_padding( + arr.shape, block_size, m1, m2, m3, blockx, blocky + ) + if padding_width is not None: + arr = cupy.pad(arr, padding_width, mode="constant", constant_values=1) + size = arr.shape[0] + + # pba algorithm was implemented to use 32-bit integer to store compressed + # coordinates. input_arr will be C-contiguous, int32 + size_max = max(arr.shape) + input_arr = encode3d(arr, size_max=size_max) + buffer_idx = 0 + output = cupy.zeros_like(input_arr) + pba_images = [input_arr, output] + + block = (blockx, blocky, 1) + grid = (size // block[0], size // block[1], 1) + pba3d = cupy.RawModule( + code=get_pba3d_src(block_size_3d=block_size, size_max=size_max) + ) + + kernelFloodZ = pba3d.get_function("kernelFloodZ") + if sampling is None: + kernelMaurerAxis = pba3d.get_function("kernelMaurerAxis") + kernelColorAxis = pba3d.get_function("kernelColorAxis") + sampling_args = () + else: + kernelMaurerAxis = pba3d.get_function("kernelMaurerAxisWithSpacing") + kernelColorAxis = pba3d.get_function("kernelColorAxisWithSpacing") + sampling = tuple(map(float, sampling)) + sampling_args = (sampling[2], sampling[1], sampling[0]) + + kernelFloodZ( + grid, + block, + (pba_images[buffer_idx], pba_images[1 - buffer_idx], size) + ) + buffer_idx = 1 - buffer_idx + + block = (blockx, blocky, 1) + grid = (size // block[0], size // block[1], 1) + kernelMaurerAxis( + grid, + block, + (pba_images[buffer_idx], pba_images[1 - buffer_idx], size) + sampling_args, # noqa + ) + + block = (block_size, m3, 1) + grid = (size // block[0], size, 1) + kernelColorAxis( + grid, + block, + (pba_images[1 - buffer_idx], pba_images[buffer_idx], size) + sampling_args, # noqa + ) + + if sampling is not None: + # kernelColorAxis transposes the first two axis, so have to reorder + # the sampling_args tuple correspondingly + sampling_args = (sampling[1], sampling[2], sampling[0]) + + block = (blockx, blocky, 1) + grid = (size // block[0], size // block[1], 1) + kernelMaurerAxis( + grid, + block, + (pba_images[buffer_idx], pba_images[1 - buffer_idx], size) + sampling_args, # noqa + ) + + block = (block_size, m3, 1) + grid = (size // block[0], size, 1) + kernelColorAxis( + grid, + block, + (pba_images[1 - buffer_idx], pba_images[buffer_idx], size) + sampling_args, # noqa + ) + output = pba_images[buffer_idx] + + if return_distances: + out_shape = (orig_sz, orig_sy, orig_sx) + dtype_out = cupy.float64 if float64_distances else cupy.float32 + if dt_inplace: + _check_distances(distances, out_shape, dtype_out) + else: + distances = cupy.zeros(out_shape, dtype=dtype_out) + + # make sure maximum possible distance doesn't overflow + max_possible_dist = sum((s - 1)**2 for s in out_shape) + large_dist = max_possible_dist >= 2**31 + + if not return_indices: + # Compute distances without forming explicit coordinate arrays. + kern = _get_decode_as_distance_kernel( + size_max=size_max, + large_dist=large_dist, + sampling=sampling + ) + if sampling is None: + kern(output[:orig_sz, :orig_sy, :orig_sx], distances) + else: + sampling = cupy.asarray(sampling, dtype=distances.dtype) + kern(output[:orig_sz, :orig_sy, :orig_sx], sampling, distances) + return (distances,) + + if return_indices: + x, y, z = decode3d(output[:orig_sz, :orig_sy, :orig_sx], + size_max=size_max) + vals = () + if return_distances: + if sampling is None: + kern = _get_distance_kernel( + int_type=_get_inttype(distances), large_dist=large_dist, + ) + kern(z, y, x, distances) + else: + kern = _get_aniso_distance_kernel(int_type=_get_inttype(distances)) + sampling = cupy.asarray(sampling, dtype=distances.dtype) + kern(z, y, x, sampling, distances) + vals = vals + (distances,) + if return_indices: + if indices_inplace: + _check_indices(indices, (arr.ndim,) + arr.shape, x.dtype.itemsize) + indices[0, ...] = z + indices[1, ...] = y + indices[2, ...] = x + else: + indices = cupy.stack((z, y, x), axis=0) + vals = vals + (indices,) + return vals diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_spline_kernel_weights.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_spline_kernel_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..5fcfcafe3de57097444abe8cfd9515b1bf654950 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_spline_kernel_weights.py @@ -0,0 +1,73 @@ +"""Determination of spline kernel weights (adapted from SciPy) + +See more verbose comments for each case there: +https://github.com/scipy/scipy/blob/eba29d69846ab1299976ff4af71c106188397ccc/scipy/ndimage/src/ni_splines.c#L7 + +``spline_weights_inline`` is a dict where the key is the spline order and the +value is the spline weight initialization code. +""" + +spline_weights_inline = {} + +# Note: This order = 1 case is currently unused (order = 1 has a different code +# path in _interp_kernels.py). I think that existing code is a bit more +# efficient. +spline_weights_inline[1] = ''' +wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5); +weights_{j}[0] = 1.0 - wx; +weights_{j}[1] = wx; +''' + +spline_weights_inline[2] = ''' +wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5); +weights_{j}[1] = 0.75 - wx * wx; +wy = 0.5 - wx; +weights_{j}[0] = 0.5 * wy * wy; +weights_{j}[2] = 1.0 - weights_{j}[0] - weights_{j}[1]; +''' + +spline_weights_inline[3] = ''' +wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5); +wy = 1.0 - wx; +weights_{j}[1] = (wx * wx * (wx - 2.0) * 3.0 + 4.0) / 6.0; +weights_{j}[2] = (wy * wy * (wy - 2.0) * 3.0 + 4.0) / 6.0; +weights_{j}[0] = wy * wy * wy / 6.0; +weights_{j}[3] = 1.0 - weights_{j}[0] - weights_{j}[1] - weights_{j}[2]; +''' + +spline_weights_inline[4] = ''' +wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5); +wy = wx * wx; +weights_{j}[2] = wy * (wy * 0.25 - 0.625) + 115.0 / 192.0; +wy = 1.0 + wx; +weights_{j}[1] = wy * (wy * (wy * (5.0 - wy) / 6.0 - 1.25) + 5.0 / 24.0) + + 55.0 / 96.0; +wy = 1.0 - wx; +weights_{j}[3] = wy * (wy * (wy * (5.0 - wy) / 6.0 - 1.25) + 5.0 / 24.0) + + 55.0 / 96.0; +wy = 0.5 - wx; +wy = wy * wy; +weights_{j}[0] = wy * wy / 24.0; +weights_{j}[4] = 1.0 - weights_{j}[0] - weights_{j}[1] + - weights_{j}[2] - weights_{j}[3]; +''' + +spline_weights_inline[5] = ''' +wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5); +wy = wx * wx; +weights_{j}[2] = wy * (wy * (0.25 - wx / 12.0) - 0.5) + 0.55; +wy = 1.0 - wx; +wy = wy * wy; +weights_{j}[3] = wy * (wy * (0.25 - (1.0 - wx) / 12.0) - 0.5) + 0.55; +wy = wx + 1.0; +weights_{j}[1] = wy * (wy * (wy * (wy * (wy / 24.0 - 0.375) + 1.25) - 1.75) + + 0.625) + 0.425; +wy = 2.0 - wx; +weights_{j}[4] = wy * (wy * (wy * (wy * (wy / 24.0 - 0.375) + 1.25) - 1.75) + + 0.625) + 0.425; +wy = 1.0 - wx; +wy = wy * wy; +weights_{j}[0] = (1.0 - wx) * wy * wy / 120.0; +weights_{j}[5] = 1.0 - weights_{j}[0] - weights_{j}[1] - weights_{j}[2] + - weights_{j}[3] - weights_{j}[4]; +''' diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_spline_prefilter_core.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_spline_prefilter_core.py new file mode 100644 index 0000000000000000000000000000000000000000..29403b012a7aa9a50eab768337d18e24dbcab12f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_spline_prefilter_core.py @@ -0,0 +1,261 @@ +""" +Spline poles and boundary handling implemented as in SciPy + +https://github.com/scipy/scipy/blob/master/scipy/ndimage/src/ni_splines.c +""" +import functools +import math +import operator +import textwrap + +import cupy + + +def get_poles(order): + if order == 2: + # sqrt(8.0) - 3.0 + return (-0.171572875253809902396622551580603843,) + elif order == 3: + # sqrt(3.0) - 2.0 + return (-0.267949192431122706472553658494127633,) + elif order == 4: + # sqrt(664.0 - sqrt(438976.0)) + sqrt(304.0) - 19.0 + # sqrt(664.0 + sqrt(438976.0)) - sqrt(304.0) - 19.0 + return (-0.361341225900220177092212841325675255, + -0.013725429297339121360331226939128204) + elif order == 5: + # sqrt(67.5 - sqrt(4436.25)) + sqrt(26.25) - 6.5 + # sqrt(67.5 + sqrt(4436.25)) - sqrt(26.25) - 6.5 + return (-0.430575347099973791851434783493520110, + -0.043096288203264653822712376822550182) + else: + raise ValueError('only order 2-5 supported') + + +def get_gain(poles): + return functools.reduce(operator.mul, + [(1.0 - z) * (1.0 - 1.0 / z) for z in poles]) + + +def _causal_init_code(mode): + """Code for causal initialization step of IIR filtering. + + c is a 1d array of length n and z is a filter pole + """ + code = f''' + // causal init for mode={mode}''' + if mode == 'mirror': + code += ''' + z_i = z; + z_n_1 = pow(z, (P)(n - 1)); + + c[0] = c[0] + z_n_1 * c[(n - 1) * element_stride]; + for (i = 1; i < min(n - 1, static_cast({n_boundary})); ++i) {{ + c[0] += z_i * (c[i * element_stride] + + z_n_1 * c[(n - 1 - i) * element_stride]); + z_i *= z; + }} + c[0] /= 1 - z_n_1 * z_n_1;''' + elif mode == 'grid-wrap': + code += ''' + z_i = z; + + for (i = 1; i < min(n, static_cast({n_boundary})); ++i) {{ + c[0] += z_i * c[(n - i) * element_stride]; + z_i *= z; + }} + c[0] /= 1 - z_i; /* z_i = pow(z, n) */''' + elif mode == 'reflect': + code += ''' + z_i = z; + z_n = pow(z, (P)n); + c0 = c[0]; + + c[0] = c[0] + z_n * c[(n - 1) * element_stride]; + for (i = 1; i < min(n, static_cast({n_boundary})); ++i) {{ + c[0] += z_i * (c[i * element_stride] + + z_n * c[(n - 1 - i) * element_stride]); + z_i *= z; + }} + c[0] *= z / (1 - z_n * z_n); + c[0] += c0;''' + else: + raise ValueError('invalid mode: {}'.format(mode)) + return code + + +def _anticausal_init_code(mode): + """Code for the anti-causal initialization step of IIR filtering. + + c is a 1d array of length n and z is a filter pole + """ + code = f''' + // anti-causal init for mode={mode}''' + if mode == 'mirror': + code += ''' + c[(n - 1) * element_stride] = ( + z * c[(n - 2) * element_stride] + + c[(n - 1) * element_stride]) * z / (z * z - 1);''' + elif mode == 'grid-wrap': + code += ''' + z_i = z; + + for (i = 0; i < min(n - 1, static_cast({n_boundary})); ++i) {{ + c[(n - 1) * element_stride] += z_i * c[i * element_stride]; + z_i *= z; + }} + c[(n - 1) * element_stride] *= z / (z_i - 1); /* z_i = pow(z, n) */''' + elif mode == 'reflect': + code += ''' + c[(n - 1) * element_stride] *= z / (z - 1);''' + else: + raise ValueError('invalid mode: {}'.format(mode)) + return code + + +def _get_spline_mode(mode): + """spline boundary mode for interpolation with order >= 2.""" + if mode in ['mirror', 'reflect', 'grid-wrap']: + # exact analytic boundary conditions exist for these modes. + return mode + elif mode == 'grid-mirror': + # grid-mirror is a synonym for 'reflect' + return 'reflect' + # No exact analytical spline boundary condition implemented. Reflect gives + # lower error than using mirror or wrap for mode 'nearest'. Otherwise, a + # mirror spline boundary condition is used. + return 'reflect' if mode == 'nearest' else 'mirror' + + +def _get_spline1d_code(mode, poles, n_boundary): + """Generates the code required for IIR filtering of a single 1d signal. + + Prefiltering is done by causal filtering followed by anti-causal filtering. + Multiple boundary conditions have been implemented. + """ + code = [''' + __device__ void spline_prefilter1d( + T* __restrict__ c, idx_t signal_length, idx_t element_stride) + {{'''] + + # variables common to all boundary modes + code.append(''' + idx_t i, n = signal_length; + P z, z_i;''') + + # retrieve the spline boundary extension mode to use + mode = _get_spline_mode(mode) + + if mode == 'mirror': + # variables specific to mirror boundary mode + code.append(''' + P z_n_1;''') + elif mode == 'reflect': + # variables specific to reflect boundary mode + code.append(''' + P z_n; + T c0;''') + + for pole in poles: + + code.append(f''' + // select the current pole + z = {pole};''') + + # initialize and apply the causal filter + code.append(_causal_init_code(mode)) + code.append(''' + // apply the causal filter for the current pole + for (i = 1; i < n; ++i) {{ + c[i * element_stride] += z * c[(i - 1) * element_stride]; + }}''') + code.append(''' + #ifdef __HIP_DEVICE_COMPILE__ + __syncthreads(); + #endif + ''') + # initialize and apply the anti-causal filter + code.append(_anticausal_init_code(mode)) + code.append(''' + // apply the anti-causal filter for the current pole + for (i = n - 2; i >= 0; --i) {{ + c[i * element_stride] = z * (c[(i + 1) * element_stride] - + c[i * element_stride]); + }}''') + + code += [''' + }}'''] + return textwrap.dedent('\n'.join(code)).format(n_boundary=n_boundary) + + +_FILTER_GENERAL = ''' +#include "cupy/carray.cuh" +#include "cupy/complex.cuh" +typedef {data_type} T; +typedef {pole_type} P; +typedef {index_type} idx_t; +template +__device__ T* row( + T* ptr, idx_t i, idx_t axis, idx_t ndim, const idx_t* shape) {{ + idx_t index = 0, stride = 1; + for (idx_t a = ndim - 1; a > 0; --a) {{ + if (a != axis) {{ + index += (i % shape[a]) * stride; + i /= shape[a]; + }} + stride *= shape[a]; + }} + return ptr + index + stride * i; +}} +''' + + +_batch_spline1d_strided_template = """ +extern "C" __global__ +__launch_bounds__({block_size}) +void {kernel_name}(T* __restrict__ y, const idx_t* __restrict__ info) {{ + const idx_t n_signals = info[0], n_samples = info[1], + * __restrict__ shape = info+2; + idx_t y_elem_stride = 1; + for (int a = {ndim} - 1; a > {axis}; --a) {{ y_elem_stride *= shape[a]; }} + idx_t unraveled_idx = blockDim.x * blockIdx.x + threadIdx.x; + idx_t batch_idx = unraveled_idx; + if (batch_idx < n_signals) + {{ + T* __restrict__ y_i = row(y, batch_idx, {axis}, {ndim}, shape); + spline_prefilter1d(y_i, n_samples, y_elem_stride); + }} +}} +""" + + +@cupy.memoize(for_each_device=True) +def get_raw_spline1d_kernel(axis, ndim, mode, order, index_type='int', + data_type='double', pole_type='double', + block_size=128): + """Generate a kernel for applying a spline prefilter along a given axis.""" + poles = get_poles(order) + + # determine number of samples for the boundary approximation + # (SciPy uses n_boundary = n_samples but this is excessive) + largest_pole = max([abs(p) for p in poles]) + # tol < 1e-7 fails test cases comparing to SciPy at atol = rtol = 1e-5 + tol = 1e-10 if pole_type == 'float' else 1e-18 + n_boundary = math.ceil(math.log(tol, largest_pole)) + + # headers and general utility function for extracting rows of data + code = _FILTER_GENERAL.format(index_type=index_type, + data_type=data_type, + pole_type=pole_type) + + # generate source for a 1d function for a given boundary mode and poles + code += _get_spline1d_code(mode, poles, n_boundary) + + # generate code handling batch operation of the 1d filter + mode_str = mode.replace('-', '_') # cannot have '-' in kernel name + kernel_name = (f'cupyx_scipy_ndimage_spline_filter_{ndim}d_ord{order}_' + f'axis{axis}_{mode_str}') + code += _batch_spline1d_strided_template.format(ndim=ndim, axis=axis, + block_size=block_size, + kernel_name=kernel_name) + return cupy.RawKernel(code, kernel_name) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_util.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..f54a8b94792dd3a167bbf12e2de4d772657b42ba --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/_util.py @@ -0,0 +1,160 @@ +import warnings + +import cupy + + +def _is_integer_output(output, input): + if output is None: + return input.dtype.kind in 'iu' + elif isinstance(output, cupy.ndarray): + return output.dtype.kind in 'iu' + return cupy.dtype(output).kind in 'iu' + + +def _check_cval(mode, cval, integer_output): + if mode == 'constant' and integer_output and not cupy.isfinite(cval): + raise NotImplementedError("Non-finite cval is not supported for " + "outputs with integer dtype.") + + +def _init_weights_dtype(input): + """Initialize filter weights based on the input array. + + This helper is only used during initialization of some internal filters + like prewitt and sobel to avoid costly double-precision computation. + """ + if input.dtype.kind == "c": + return cupy.promote_types(input.real.dtype, cupy.complex64) + return cupy.promote_types(input.real.dtype, cupy.float32) + + +def _get_weights_dtype(input, weights): + if weights.dtype.kind == "c" or input.dtype.kind == "c": + return cupy.promote_types(input.real.dtype, cupy.complex64) + elif weights.dtype.kind in 'iub': + # convert integer dtype weights to double as in SciPy + return cupy.float64 + return cupy.promote_types(input.real.dtype, cupy.float32) + + +def _get_output(output, input, shape=None, complex_output=False): + shape = input.shape if shape is None else shape + if output is None: + if complex_output: + _dtype = cupy.promote_types(input.dtype, cupy.complex64) + else: + _dtype = input.dtype + output = cupy.empty(shape, dtype=_dtype) + elif isinstance(output, (type, cupy.dtype)): + if complex_output and cupy.dtype(output).kind != 'c': + warnings.warn("promoting specified output dtype to complex") + output = cupy.promote_types(output, cupy.complex64) + output = cupy.empty(shape, dtype=output) + elif isinstance(output, str): + output = cupy.dtype(output) + if complex_output and output.kind != 'c': + raise RuntimeError("output must have complex dtype") + output = cupy.empty(shape, dtype=output) + elif output.shape != shape: + raise RuntimeError("output shape not correct") + elif complex_output and output.dtype.kind != 'c': + raise RuntimeError("output must have complex dtype") + return output + + +def _fix_sequence_arg(arg, ndim, name, conv=lambda x: x): + if isinstance(arg, str): + return [conv(arg)] * ndim + try: + arg = iter(arg) + except TypeError: + return [conv(arg)] * ndim + lst = [conv(x) for x in arg] + if len(lst) != ndim: + msg = "{} must have length equal to input rank".format(name) + raise RuntimeError(msg) + return lst + + +def _check_origin(origin, width): + origin = int(origin) + if (width // 2 + origin < 0) or (width // 2 + origin >= width): + raise ValueError('invalid origin') + return origin + + +def _check_mode(mode): + if mode not in ('reflect', 'constant', 'nearest', 'mirror', 'wrap', + 'grid-mirror', 'grid-wrap', 'grid-reflect'): + msg = f'boundary mode not supported (actual: {mode})' + raise RuntimeError(msg) + return mode + + +def _get_inttype(input): + # The integer type to use for indices in the input array + # The indices actually use byte positions and we can't just use + # input.nbytes since that won't tell us the number of bytes between the + # first and last elements when the array is non-contiguous + nbytes = sum((x-1)*abs(stride) for x, stride in + zip(input.shape, input.strides)) + input.dtype.itemsize + return 'int' if nbytes < (1 << 31) else 'ptrdiff_t' + + +def _generate_boundary_condition_ops(mode, ix, xsize, int_t="int", + float_ix=False): + min_func = "fmin" if float_ix else "min" + max_func = "fmax" if float_ix else "max" + if mode in ['reflect', 'grid-mirror']: + ops = ''' + if ({ix} < 0) {{ + {ix} = - 1 -{ix}; + }} + {ix} %= {xsize} * 2; + {ix} = {min}({ix}, 2 * {xsize} - 1 - {ix});'''.format( + ix=ix, xsize=xsize, min=min_func) + elif mode == 'mirror': + ops = ''' + if ({xsize} == 1) {{ + {ix} = 0; + }} else {{ + if ({ix} < 0) {{ + {ix} = -{ix}; + }} + {ix} = 1 + ({ix} - 1) % (({xsize} - 1) * 2); + {ix} = {min}({ix}, 2 * {xsize} - 2 - {ix}); + }}'''.format(ix=ix, xsize=xsize, min=min_func) + elif mode == 'nearest': + ops = ''' + {ix} = {min}({max}(({T}){ix}, ({T})0), ({T})({xsize} - 1));'''.format( + ix=ix, xsize=xsize, min=min_func, max=max_func, + # force using 64-bit signed integer for ptrdiff_t, + # see cupy/cupy#6048 + T=('int' if int_t == 'int' else 'long long')) + elif mode == 'grid-wrap': + ops = ''' + {ix} %= {xsize}; + while ({ix} < 0) {{ + {ix} += {xsize}; + }}'''.format(ix=ix, xsize=xsize) + elif mode == 'wrap': + ops = ''' + if ({ix} < 0) {{ + {ix} += ({sz} - 1) * (({int_t})(-{ix} / ({sz} - 1)) + 1); + }} else if ({ix} > ({sz} - 1)) {{ + {ix} -= ({sz} - 1) * ({int_t})({ix} / ({sz} - 1)); + }};'''.format(ix=ix, sz=xsize, int_t=int_t) + elif mode in ['constant', 'grid-constant']: + ops = ''' + if (({ix} < 0) || {ix} >= {xsize}) {{ + {ix} = -1; + }}'''.format(ix=ix, xsize=xsize) + return ops + + +def _generate_indices_ops(ndim, int_type, offsets): + code = '{type} ind_{j} = _i % ysize_{j} - {offset}; _i /= ysize_{j};' + body = [code.format(type=int_type, j=j, offset=offsets[j]) + for j in range(ndim-1, 0, -1)] + return '{type} _i = i;\n{body}\n{type} ind_0 = _i - {offset};'.format( + type=int_type, body='\n'.join(body), offset=offsets[0]) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/LICENSE b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9d0b4030a490c4100a90f9d006cc8d65a705f568 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 School of Computing, National University of Singapore + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/pba_kernels_2d.h b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/pba_kernels_2d.h new file mode 100644 index 0000000000000000000000000000000000000000..4156340abd34ea4de329915f5aee5731b99ee19d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/pba_kernels_2d.h @@ -0,0 +1,695 @@ +// Euclidean Distance Transform +// +// Kernels for the 2D version of the Parallel Banding Algorithm (PBA+). +// +// MIT license: see LICENSE in this folder +// Copyright: (c) 2019 School of Computing, National University of Singapore +// +// Modifications by Gregory Lee (2022) (NVIDIA) +// - add user-defined pixel_int2_t to enable +// - replace __mul24 operations with standard multiplication operator +// - Add variant kernels with support for non-isotropic pixel dimensions. These +// kernels differ from the originals in that they also take sx and sy values +// indicating the pixel size along the x and y axes. The kernels are identical +// except that the `dominate` function is replaced by `dominate_sp` and the +// physical spacings are used when computing distances. +// + + +// START OF DEFINITIONS OVERRIDDEN BY THE PYTHON SCRIPT + +// The values included in this header file are those defined in the original +// PBA+ implementation + +// However, the Python code generation can potentially generate a different +// ENCODE/DECODE that use 20 bits per coordinates instead of 10 bits per +// coordinate with ENCODED_INT_TYPE as `long long`. + +#ifndef MARKER +#define MARKER -32768 +#endif + +#ifndef BLOCKSIZE +#define BLOCKSIZE 32 +#endif + +#ifndef pixel_int2_t +#define pixel_int2_t short2 +#define make_pixel(x, y) make_short2(x, y) +#endif + +// END OF DEFINITIONS OVERRIDDEN BY THE PYTHON SCRIPT + + +#define TOID(x, y, size) ((y) * (size) + (x)) + +#define LL long long +__device__ bool dominate(LL x1, LL y1, LL x2, LL y2, LL x3, LL y3, LL x0) +{ + LL k1 = y2 - y1, k2 = y3 - y2; + return (k1 * (y1 + y2) + (x2 - x1) * ((x1 + x2) - (x0 << 1))) * k2 > \ + (k2 * (y2 + y3) + (x3 - x2) * ((x2 + x3) - (x0 << 1))) * k1; +} +#undef LL + +// version of dominate, but with per-axis floating-point spacing +__device__ bool dominate_sp(int _x1, int _y1, int _x2, int _y2, int _x3, int _y3, int _x0, float sx, float sy) +{ + float x1 = static_cast(_x1) * sx; + float x2 = static_cast(_x2) * sx; + float x3 = static_cast(_x3) * sx; + float y1 = static_cast(_y1) * sy; + float y2 = static_cast(_y2) * sy; + float y3 = static_cast(_y3) * sy; + float x0_2 = static_cast(_x0 << 1) * sx; + float k1 = (y2 - y1); + float k2 = (y3 - y2); + return (k1 * (y1 + y2) + (x2 - x1) * ((x1 + x2) - x0_2)) * k2 > \ + (k2 * (y2 + y3) + (x3 - x2) * ((x2 + x3) - x0_2)) * k1; +} + + +extern "C"{ + +__global__ void kernelFloodDown(pixel_int2_t *input, pixel_int2_t *output, int size, int bandSize) +{ + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int ty = blockIdx.y * bandSize; + int id = TOID(tx, ty, size); + + pixel_int2_t pixel1, pixel2; + + pixel1 = make_pixel(MARKER, MARKER); + + for (int i = 0; i < bandSize; i++, id += size) { + pixel2 = input[id]; + + if (pixel2.x != MARKER) + pixel1 = pixel2; + + output[id] = pixel1; + } +} + +__global__ void kernelFloodUp(pixel_int2_t *input, pixel_int2_t *output, int size, int bandSize) +{ + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int ty = (blockIdx.y+1) * bandSize - 1; + int id = TOID(tx, ty, size); + + pixel_int2_t pixel1, pixel2; + int dist1, dist2; + + pixel1 = make_pixel(MARKER, MARKER); + + for (int i = 0; i < bandSize; i++, id -= size) { + dist1 = abs(pixel1.y - ty + i); + + pixel2 = input[id]; + dist2 = abs(pixel2.y - ty + i); + + if (dist2 < dist1) + pixel1 = pixel2; + + output[id] = pixel1; + } +} + +__global__ void kernelPropagateInterband(pixel_int2_t *input, pixel_int2_t *margin_out, int size, int bandSize) +{ + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int inc = bandSize * size; + int ny, nid, nDist; + pixel_int2_t pixel; + + // Top row, look backward + int ty = blockIdx.y * bandSize; + int topId = TOID(tx, ty, size); + int bottomId = TOID(tx, ty + bandSize - 1, size); + int tid = blockIdx.y * size + tx; + int bid = tid + (size * size / bandSize); + + pixel = input[topId]; + int myDist = abs(pixel.y - ty); + margin_out[tid] = pixel; + + for (nid = bottomId - inc; nid >= 0; nid -= inc) { + pixel = input[nid]; + + if (pixel.x != MARKER) { + nDist = abs(pixel.y - ty); + + if (nDist < myDist) + margin_out[tid] = pixel; + + break; + } + } + + // Last row, look downward + ty = ty + bandSize - 1; + pixel = input[bottomId]; + myDist = abs(pixel.y - ty); + margin_out[bid] = pixel; + + for (ny = ty + 1, nid = topId + inc; ny < size; ny += bandSize, nid += inc) { + pixel = input[nid]; + + if (pixel.x != MARKER) { + nDist = abs(pixel.y - ty); + + if (nDist < myDist) + margin_out[bid] = pixel; + + break; + } + } +} + +__global__ void kernelUpdateVertical(pixel_int2_t *color, pixel_int2_t *margin, pixel_int2_t *output, int size, int bandSize) +{ + __shared__ pixel_int2_t block[BLOCKSIZE][BLOCKSIZE]; + + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int ty = blockIdx.y * bandSize; + + pixel_int2_t top = margin[blockIdx.y * size + tx]; + pixel_int2_t bottom = margin[(blockIdx.y + size / bandSize) * size + tx]; + pixel_int2_t pixel; + + int dist, myDist; + + int id = TOID(tx, ty, size); + + int n_step = bandSize / blockDim.x; + for(int step = 0; step < n_step; ++step) { + int y_start = blockIdx.y * bandSize + step * blockDim.x; + int y_end = y_start + blockDim.x; + + for (ty = y_start; ty < y_end; ++ty, id += size) { + pixel = color[id]; + myDist = abs(pixel.y - ty); + + dist = abs(top.y - ty); + if (dist < myDist) { myDist = dist; pixel = top; } + + dist = abs(bottom.y - ty); + if (dist < myDist) pixel = bottom; + + // temporary result is stored in block + block[threadIdx.x][ty - y_start] = make_pixel(pixel.y, pixel.x); + } + + __syncthreads(); + + // block is written to a transposed location in the output + + int tid = TOID(blockIdx.y * bandSize + step * blockDim.x + threadIdx.x, \ + blockIdx.x * blockDim.x, size); + + for(int i = 0; i < blockDim.x; ++i, tid += size) { + output[tid] = block[i][threadIdx.x]; + } + + __syncthreads(); + } +} + +__global__ void kernelProximatePoints(pixel_int2_t *input, pixel_int2_t *stack, int size, int bandSize) +{ + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int ty = blockIdx.y * bandSize; + int id = TOID(tx, ty, size); + int lasty = -1; + pixel_int2_t last1, last2, current; + + last1.y = -1; last2.y = -1; + + for (int i = 0; i < bandSize; i++, id += size) { + current = input[id]; + + if (current.x != MARKER) { + while (last2.y >= 0) { + if (!dominate(last1.x, last2.y, last2.x, \ + lasty, current.x, current.y, tx)) + break; + + lasty = last2.y; last2 = last1; + + if (last1.y >= 0) + last1 = stack[TOID(tx, last1.y, size)]; + } + + last1 = last2; last2 = make_pixel(current.x, lasty); lasty = current.y; + + stack[id] = last2; + } + } + + // Store the pointer to the tail at the last pixel of this band + if (lasty != ty + bandSize - 1) + stack[TOID(tx, ty + bandSize - 1, size)] = make_pixel(MARKER, lasty); +} + + +__global__ void kernelProximatePointsWithSpacing(pixel_int2_t *input, pixel_int2_t *stack, int size, int bandSize, double sx, double sy) +{ + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int ty = blockIdx.y * bandSize; + int id = TOID(tx, ty, size); + int lasty = -1; + pixel_int2_t last1, last2, current; + + last1.y = -1; last2.y = -1; + + for (int i = 0; i < bandSize; i++, id += size) { + current = input[id]; + + if (current.x != MARKER) { + while (last2.y >= 0) { + if (!dominate_sp(last1.x, last2.y, last2.x, \ + lasty, current.x, current.y, tx, sx, sy)) + break; + + lasty = last2.y; last2 = last1; + + if (last1.y >= 0) + last1 = stack[TOID(tx, last1.y, size)]; + } + + last1 = last2; last2 = make_pixel(current.x, lasty); lasty = current.y; + + stack[id] = last2; + } + } + + // Store the pointer to the tail at the last pixel of this band + if (lasty != ty + bandSize - 1) + stack[TOID(tx, ty + bandSize - 1, size)] = make_pixel(MARKER, lasty); +} + +__global__ void kernelCreateForwardPointers(pixel_int2_t *input, pixel_int2_t *output, int size, int bandSize) +{ + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int ty = (blockIdx.y+1) * bandSize - 1; + int id = TOID(tx, ty, size); + int lasty = -1, nexty; + pixel_int2_t current; + + // Get the tail pointer + current = input[id]; + + if (current.x == MARKER) + nexty = current.y; + else + nexty = ty; + + for (int i = 0; i < bandSize; i++, id -= size) + if (ty - i == nexty) { + current = make_pixel(lasty, input[id].y); + output[id] = current; + + lasty = nexty; + nexty = current.y; + } + + // Store the pointer to the head at the first pixel of this band + if (lasty != ty - bandSize + 1) + output[id + size] = make_pixel(lasty, MARKER); +} + +__global__ void kernelMergeBands(pixel_int2_t *color, pixel_int2_t *link, pixel_int2_t *output, int size, int bandSize) +{ + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int band1 = blockIdx.y * 2; + int band2 = band1 + 1; + int firsty, lasty; + pixel_int2_t last1, last2, current; + // last1 and last2: x component store the x coordinate of the site, + // y component store the backward pointer + // current: y component store the x coordinate of the site, + // x component store the forward pointer + + // Get the two last items of the first list + lasty = band2 * bandSize - 1; + last2 = make_pixel(color[TOID(tx, lasty, size)].x, + link[TOID(tx, lasty, size)].y); + + if (last2.x == MARKER) { + lasty = last2.y; + + if (lasty >= 0) + last2 = make_pixel(color[TOID(tx, lasty, size)].x, + link[TOID(tx, lasty, size)].y); + else + last2 = make_pixel(MARKER, MARKER); + } + + if (last2.y >= 0) { + // Second item at the top of the stack + last1 = make_pixel(color[TOID(tx, last2.y, size)].x, + link[TOID(tx, last2.y, size)].y); + } + + // Get the first item of the second band + firsty = band2 * bandSize; + current = make_pixel(link[TOID(tx, firsty, size)].x, + color[TOID(tx, firsty, size)].x); + + if (current.y == MARKER) { + firsty = current.x; + + if (firsty >= 0) + current = make_pixel(link[TOID(tx, firsty, size)].x, + color[TOID(tx, firsty, size)].x); + else + current = make_pixel(MARKER, MARKER); + } + + // Count the number of item in the second band that survive so far. + // Once it reaches 2, we can stop. + int top = 0; + + while (top < 2 && current.y >= 0) { + // While there's still something on the left + while (last2.y >= 0) { + + if (!dominate(last1.x, last2.y, last2.x, \ + lasty, current.y, firsty, tx)) + break; + + lasty = last2.y; last2 = last1; + top--; + + if (last1.y >= 0) + last1 = make_pixel(color[TOID(tx, last1.y, size)].x, + link[TOID(tx, last1.y, size)].y); + } + + // Update the current pointer + output[TOID(tx, firsty, size)] = make_pixel(current.x, lasty); + + if (lasty >= 0) + output[TOID(tx, lasty, size)] = make_pixel(firsty, last2.y); + + last1 = last2; last2 = make_pixel(current.y, lasty); lasty = firsty; + firsty = current.x; + + top = max(1, top + 1); + + // Advance the current pointer to the next one + if (firsty >= 0) + current = make_pixel(link[TOID(tx, firsty, size)].x, + color[TOID(tx, firsty, size)].x); + else + current = make_pixel(MARKER, MARKER); + } + + // Update the head and tail pointer. + firsty = band1 * bandSize; + lasty = band2 * bandSize; + current = link[TOID(tx, firsty, size)]; + + if (current.y == MARKER && current.x < 0) { // No head? + last1 = link[TOID(tx, lasty, size)]; + + if (last1.y == MARKER) + current.x = last1.x; + else + current.x = lasty; + + output[TOID(tx, firsty, size)] = current; + } + + firsty = band1 * bandSize + bandSize - 1; + lasty = band2 * bandSize + bandSize - 1; + current = link[TOID(tx, lasty, size)]; + + if (current.x == MARKER && current.y < 0) { // No tail? + last1 = link[TOID(tx, firsty, size)]; + + if (last1.x == MARKER) + current.y = last1.y; + else + current.y = firsty; + + output[TOID(tx, lasty, size)] = current; + } +} + + +__global__ void kernelMergeBandsWithSpacing(pixel_int2_t *color, pixel_int2_t *link, pixel_int2_t *output, int size, int bandSize, double sx, double sy) +{ + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int band1 = blockIdx.y * 2; + int band2 = band1 + 1; + int firsty, lasty; + pixel_int2_t last1, last2, current; + // last1 and last2: x component store the x coordinate of the site, + // y component store the backward pointer + // current: y component store the x coordinate of the site, + // x component store the forward pointer + + // Get the two last items of the first list + lasty = band2 * bandSize - 1; + last2 = make_pixel(color[TOID(tx, lasty, size)].x, + link[TOID(tx, lasty, size)].y); + + if (last2.x == MARKER) { + lasty = last2.y; + + if (lasty >= 0) + last2 = make_pixel(color[TOID(tx, lasty, size)].x, + link[TOID(tx, lasty, size)].y); + else + last2 = make_pixel(MARKER, MARKER); + } + + if (last2.y >= 0) { + // Second item at the top of the stack + last1 = make_pixel(color[TOID(tx, last2.y, size)].x, + link[TOID(tx, last2.y, size)].y); + } + + // Get the first item of the second band + firsty = band2 * bandSize; + current = make_pixel(link[TOID(tx, firsty, size)].x, + color[TOID(tx, firsty, size)].x); + + if (current.y == MARKER) { + firsty = current.x; + + if (firsty >= 0) + current = make_pixel(link[TOID(tx, firsty, size)].x, + color[TOID(tx, firsty, size)].x); + else + current = make_pixel(MARKER, MARKER); + } + + // Count the number of item in the second band that survive so far. + // Once it reaches 2, we can stop. + int top = 0; + + while (top < 2 && current.y >= 0) { + // While there's still something on the left + while (last2.y >= 0) { + + if (!dominate_sp(last1.x, last2.y, last2.x, \ + lasty, current.y, firsty, tx, sx, sy)) + break; + + lasty = last2.y; last2 = last1; + top--; + + if (last1.y >= 0) + last1 = make_pixel(color[TOID(tx, last1.y, size)].x, + link[TOID(tx, last1.y, size)].y); + } + + // Update the current pointer + output[TOID(tx, firsty, size)] = make_pixel(current.x, lasty); + + if (lasty >= 0) + output[TOID(tx, lasty, size)] = make_pixel(firsty, last2.y); + + last1 = last2; last2 = make_pixel(current.y, lasty); lasty = firsty; + firsty = current.x; + + top = max(1, top + 1); + + // Advance the current pointer to the next one + if (firsty >= 0) + current = make_pixel(link[TOID(tx, firsty, size)].x, + color[TOID(tx, firsty, size)].x); + else + current = make_pixel(MARKER, MARKER); + } + + // Update the head and tail pointer. + firsty = band1 * bandSize; + lasty = band2 * bandSize; + current = link[TOID(tx, firsty, size)]; + + if (current.y == MARKER && current.x < 0) { // No head? + last1 = link[TOID(tx, lasty, size)]; + + if (last1.y == MARKER) + current.x = last1.x; + else + current.x = lasty; + + output[TOID(tx, firsty, size)] = current; + } + + firsty = band1 * bandSize + bandSize - 1; + lasty = band2 * bandSize + bandSize - 1; + current = link[TOID(tx, lasty, size)]; + + if (current.x == MARKER && current.y < 0) { // No tail? + last1 = link[TOID(tx, firsty, size)]; + + if (last1.x == MARKER) + current.y = last1.y; + else + current.y = firsty; + + output[TOID(tx, lasty, size)] = current; + } +} + +__global__ void kernelDoubleToSingleList(pixel_int2_t *color, pixel_int2_t *link, pixel_int2_t *output, int size) +{ + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int ty = blockIdx.y; + int id = TOID(tx, ty, size); + + output[id] = make_pixel(color[id].x, link[id].y); +} + +__global__ void kernelColor(pixel_int2_t *input, pixel_int2_t *output, int size) +{ + __shared__ pixel_int2_t block[BLOCKSIZE][BLOCKSIZE]; + + int col = threadIdx.x; + int tid = threadIdx.y; + int tx = blockIdx.x * blockDim.x + col; + int dx, dy, lasty; + unsigned int best, dist; + pixel_int2_t last1, last2; + + lasty = size - 1; + + last2 = input[TOID(tx, lasty, size)]; + + if (last2.x == MARKER) { + lasty = max(last2.y, 0); + last2 = input[TOID(tx, lasty, size)]; + } + + if (last2.y >= 0) + last1 = input[TOID(tx, last2.y, size)]; + + int y_start, y_end, n_step = size / blockDim.x; + for(int step = 0; step < n_step; ++step) { + y_start = size - step * blockDim.x - 1; + y_end = size - (step + 1) * blockDim.x; + + for (int ty = y_start - tid; ty >= y_end; ty -= blockDim.y) { + dx = last2.x - tx; dy = lasty - ty; + best = dist = dx * dx + dy * dy; + + while (last2.y >= 0) { + dx = last1.x - tx; dy = last2.y - ty; + dist = dx * dx + dy * dy; + + if (dist > best) + break; + + best = dist; lasty = last2.y; last2 = last1; + + if (last2.y >= 0) + last1 = input[TOID(tx, last2.y, size)]; + } + + block[threadIdx.x][ty - y_end] = make_pixel(lasty, last2.x); + } + + __syncthreads(); + + // note: transposes back to original shape here + if(!threadIdx.y) { + int id = TOID(y_end + threadIdx.x, blockIdx.x * blockDim.x, size); + for(int i = 0; i < blockDim.x; ++i, id+=size) { + output[id] = block[i][threadIdx.x]; + } + } + + __syncthreads(); + } +} + + +__global__ void kernelColorWithSpacing(pixel_int2_t *input, pixel_int2_t *output, int size, double sx, double sy) +{ + __shared__ pixel_int2_t block[BLOCKSIZE][BLOCKSIZE]; + + int col = threadIdx.x; + int tid = threadIdx.y; + int tx = blockIdx.x * blockDim.x + col; + int lasty; + double dx, dy, best, dist; + pixel_int2_t last1, last2; + + lasty = size - 1; + + last2 = input[TOID(tx, lasty, size)]; + + if (last2.x == MARKER) { + lasty = max(last2.y, 0); + last2 = input[TOID(tx, lasty, size)]; + } + + if (last2.y >= 0) + last1 = input[TOID(tx, last2.y, size)]; + + int y_start, y_end, n_step = size / blockDim.x; + for(int step = 0; step < n_step; ++step) { + y_start = size - step * blockDim.x - 1; + y_end = size - (step + 1) * blockDim.x; + + for (int ty = y_start - tid; ty >= y_end; ty -= blockDim.y) { + dx = static_cast(last2.x - tx) * sx; + dy = static_cast(lasty - ty) * sy; + best = dist = dx * dx + dy * dy; + + while (last2.y >= 0) { + dx = static_cast(last1.x - tx) * sx; + dy = static_cast(last2.y - ty) * sy; + dist = dx * dx + dy * dy; + + if (dist > best) + break; + + best = dist; lasty = last2.y; last2 = last1; + + if (last2.y >= 0) + last1 = input[TOID(tx, last2.y, size)]; + } + + block[threadIdx.x][ty - y_end] = make_pixel(lasty, last2.x); + } + + __syncthreads(); + + // note: transposes back to original shape here + if(!threadIdx.y) { + int id = TOID(y_end + threadIdx.x, blockIdx.x * blockDim.x, size); + for(int i = 0; i < blockDim.x; ++i, id+=size) { + output[id] = block[i][threadIdx.x]; + } + } + + __syncthreads(); + } +} +} // extern C diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/pba_kernels_3d.h b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/pba_kernels_3d.h new file mode 100644 index 0000000000000000000000000000000000000000..1fda8aac8771a705cc7bcd2869c2fb6038aec9f7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/ndimage/cuda/pba_kernels_3d.h @@ -0,0 +1,387 @@ +// Euclidean Distance Transform +// +// Kernels for the 3D version of the Parallel Banding Algorithm (PBA+). +// +// MIT license: see LICENSE in this folder +// Copyright: (c) 2019 School of Computing, National University of Singapore +// +// Modifications by Gregory Lee (2022) (NVIDIA) +// - allow user-defined ENCODED_INT_TYPE, ENCODE, DECODE +// - Add variant kernels with support for non-isotropic pixel dimensions +// These kernels differ from the originals in that they also take sx, sy and +// sz values indicating the pixel size along the x, y and z axes. The kernels +// are identical except that the `dominate` function is replaced by +// `dominate_sp` and the physical spacings are used when computing distances. + + +// START OF DEFINITIONS OVERRIDDEN BY THE PYTHON SCRIPT + +// The values included in this header file are those defined in the original +// PBA+ implementation + +// However, the Python code generation can potentially generate a different +// ENCODE/DECODE that use 20 bits per coordinates instead of 10 bits per +// coordinate with ENCODED_INT_TYPE as `long long`. + + +#ifndef MARKER +#define MARKER -2147483648 +#endif // MARKER + +#ifndef MAX_INT +#define MAX_INT 2147483647 +#endif + +#ifndef BLOCKSIZE +#define BLOCKSIZE 32 +#endif + +#ifndef ENCODE + +// Sites : ENCODE(x, y, z, 0, 0) +// Not sites : ENCODE(0, 0, 0, 1, 0) or MARKER +#define ENCODED_INT_TYPE int +#define ZERO 0 +#define ONE 1 +#define ENCODE(x, y, z, a, b) (((x) << 20) | ((y) << 10) | (z) | ((a) << 31) | ((b) << 30)) +#define DECODE(value, x, y, z) \ + x = ((value) >> 20) & 0x3ff; \ + y = ((value) >> 10) & 0x3ff; \ + z = (value) & 0x3ff + +#define NOTSITE(value) (((value) >> 31) & 1) +#define HASNEXT(value) (((value) >> 30) & 1) + +#define GET_X(value) (((value) >> 20) & 0x3ff) +#define GET_Y(value) (((value) >> 10) & 0x3ff) +#define GET_Z(value) ((NOTSITE((value))) ? MAX_INT : ((value) & 0x3ff)) + +#endif // ENCODE + +// END OF DEFINITIONS DEFINED IN THE PYTHON SCRIPT + + +#define LL long long +__device__ bool dominate(LL x_1, LL y_1, LL z_1, LL x_2, LL y_2, LL z_2, LL x_3, LL y_3, LL z_3, LL x_0, LL z_0) +{ + LL k_1 = y_2 - y_1, k_2 = y_3 - y_2; + + return (((y_1 + y_2) * k_1 + ((x_2 - x_1) * (x_1 + x_2 - (x_0 << 1)) + (z_2 - z_1) * (z_1 + z_2 - (z_0 << 1)))) * k_2 > \ + ((y_2 + y_3) * k_2 + ((x_3 - x_2) * (x_2 + x_3 - (x_0 << 1)) + (z_3 - z_2) * (z_2 + z_3 - (z_0 << 1)))) * k_1); +} +#undef LL + +__device__ bool dominate_sp(int _x_1, int _y_1, int _z_1, int _x_2, int _y_2, int _z_2, int _x_3, int _y_3, int _z_3, int _x_0, int _z_0, float sx, float sy, float sz) +{ + float x_1 = static_cast(_x_1) * sx; + float y_1 = static_cast(_y_1) * sy; + float z_1 = static_cast(_z_1) * sz; + float x_2 = static_cast(_x_2) * sx; + float y_2 = static_cast(_y_2) * sy; + float z_2 = static_cast(_z_2) * sz; + float x_3 = static_cast(_x_3) * sx; + float y_3 = static_cast(_y_3) * sy; + float z_3 = static_cast(_z_3) * sz; + float x_0_2 = static_cast(_x_0 << 1) * sx; + float z_0_2 = static_cast(_z_0 << 1) * sz; + float k_1 = y_2 - y_1; + float k_2 = y_3 - y_2; + + return (((y_1 + y_2) * k_1 + ((x_2 - x_1) * (x_1 + x_2 - (x_0_2)) + (z_2 - z_1) * (z_1 + z_2 - (z_0_2)))) * k_2 > \ + ((y_2 + y_3) * k_2 + ((x_3 - x_2) * (x_2 + x_3 - (x_0_2)) + (z_3 - z_2) * (z_2 + z_3 - (z_0_2)))) * k_1); +} + +#define TOID(x, y, z, size) ((((z) * (size)) + (y)) * (size) + (x)) + + +extern "C"{ + +__global__ void kernelFloodZ(ENCODED_INT_TYPE *input, ENCODED_INT_TYPE *output, int size) +{ + + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int ty = blockIdx.y * blockDim.y + threadIdx.y; + int tz = 0; + + int plane = size * size; + int id = TOID(tx, ty, tz, size); + ENCODED_INT_TYPE pixel1, pixel2; + + pixel1 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO); + + // Sweep down + for (int i = 0; i < size; i++, id += plane) { + pixel2 = input[id]; + + if (!NOTSITE(pixel2)) + pixel1 = pixel2; + + output[id] = pixel1; + } + + ENCODED_INT_TYPE dist1, dist2, nz; + + id -= plane + plane; + + // Sweep up + for (int i = size - 2; i >= 0; i--, id -= plane) { + nz = GET_Z(pixel1); + dist1 = abs(nz - (tz + i)); + + pixel2 = output[id]; + nz = GET_Z(pixel2); + dist2 = abs(nz - (tz + i)); + + if (dist2 < dist1) + pixel1 = pixel2; + + output[id] = pixel1; + } +} + + +__global__ void kernelMaurerAxis(ENCODED_INT_TYPE *input, ENCODED_INT_TYPE *stack, int size) +{ + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int tz = blockIdx.y * blockDim.y + threadIdx.y; + int ty = 0; + + int id = TOID(tx, ty, tz, size); + + ENCODED_INT_TYPE lasty = 0; + ENCODED_INT_TYPE x1, y1, z1, x2, y2, z2, nx, ny, nz; + ENCODED_INT_TYPE p = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), s1 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), s2 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO); + ENCODED_INT_TYPE flag = 0; + + for (ty = 0; ty < size; ++ty, id += size) { + p = input[id]; + + if (!NOTSITE(p)) { + + while (HASNEXT(s2)) { + DECODE(s1, x1, y1, z1); + DECODE(s2, x2, y2, z2); + DECODE(p, nx, ny, nz); + + if (!dominate(x1, y2, z1, x2, lasty, z2, nx, ty, nz, tx, tz)) + break; + + lasty = y2; s2 = s1; y2 = y1; + + if (HASNEXT(s2)) + s1 = stack[TOID(tx, y2, tz, size)]; + } + + DECODE(p, nx, ny, nz); + s1 = s2; + s2 = ENCODE(nx, lasty, nz, ZERO, flag); + y2 = lasty; + lasty = ty; + + stack[id] = s2; + + flag = ONE; + } + } + + if (NOTSITE(p)) + stack[TOID(tx, ty - 1, tz, size)] = ENCODE(ZERO, lasty, ZERO, ONE, flag); +} + +__global__ void kernelMaurerAxisWithSpacing(ENCODED_INT_TYPE *input, ENCODED_INT_TYPE *stack, int size, double sx, double sy, double sz) +{ + int tx = blockIdx.x * blockDim.x + threadIdx.x; + int tz = blockIdx.y * blockDim.y + threadIdx.y; + int ty = 0; + + int id = TOID(tx, ty, tz, size); + + ENCODED_INT_TYPE lasty = 0; + ENCODED_INT_TYPE x1, y1, z1, x2, y2, z2, nx, ny, nz; + ENCODED_INT_TYPE p = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), s1 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), s2 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO); + ENCODED_INT_TYPE flag = 0; + + for (ty = 0; ty < size; ++ty, id += size) { + p = input[id]; + + if (!NOTSITE(p)) { + + while (HASNEXT(s2)) { + DECODE(s1, x1, y1, z1); + DECODE(s2, x2, y2, z2); + DECODE(p, nx, ny, nz); + + if (!dominate_sp(x1, y2, z1, x2, lasty, z2, nx, ty, nz, tx, tz, sx, sy, sz)) + break; + + lasty = y2; s2 = s1; y2 = y1; + + if (HASNEXT(s2)) + s1 = stack[TOID(tx, y2, tz, size)]; + } + + DECODE(p, nx, ny, nz); + s1 = s2; + s2 = ENCODE(nx, lasty, nz, ZERO, flag); + y2 = lasty; + lasty = ty; + + stack[id] = s2; + + flag = ONE; + } + } + + if (NOTSITE(p)) + stack[TOID(tx, ty - 1, tz, size)] = ENCODE(ZERO, lasty, ZERO, ONE, flag); +} + +__global__ void kernelColorAxis(ENCODED_INT_TYPE *input, ENCODED_INT_TYPE *output, int size) +{ + __shared__ ENCODED_INT_TYPE block[BLOCKSIZE][BLOCKSIZE]; + + int col = threadIdx.x; + int tid = threadIdx.y; + int tx = blockIdx.x * blockDim.x + col; + int tz = blockIdx.y; + + ENCODED_INT_TYPE x1, y1, z1, x2, y2, z2; + ENCODED_INT_TYPE last1 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), last2 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), lasty; + long long dx, dy, dz, best, dist; + + lasty = size - 1; + + last2 = input[TOID(tx, lasty, tz, size)]; + DECODE(last2, x2, y2, z2); + + if (NOTSITE(last2)) { + lasty = y2; + if(HASNEXT(last2)) { + last2 = input[TOID(tx, lasty, tz, size)]; + DECODE(last2, x2, y2, z2); + } + } + + if (HASNEXT(last2)) { + last1 = input[TOID(tx, y2, tz, size)]; + DECODE(last1, x1, y1, z1); + } + + int y_start, y_end, n_step = size / blockDim.x; + for(int step = 0; step < n_step; ++step) { + y_start = size - step * blockDim.x - 1; + y_end = size - (step + 1) * blockDim.x; + + for (int ty = y_start - tid; ty >= y_end; ty -= blockDim.y) { + dx = x2 - tx; dy = lasty - ty; dz = z2 - tz; + best = dx * dx + dy * dy + dz * dz; + + while (HASNEXT(last2)) { + dx = x1 - tx; dy = y2 - ty; dz = z1 - tz; + dist = dx * dx + dy * dy + dz * dz; + + if(dist > best) break; + + best = dist; lasty = y2; last2 = last1; + DECODE(last2, x2, y2, z2); + + if (HASNEXT(last2)) { + last1 = input[TOID(tx, y2, tz, size)]; + DECODE(last1, x1, y1, z1); + } + } + + block[threadIdx.x][ty - y_end] = ENCODE(lasty, x2, z2, NOTSITE(last2), ZERO); + } + + __syncthreads(); + + if(!threadIdx.y) { + int id = TOID(y_end + threadIdx.x, blockIdx.x * blockDim.x, tz, size); + for(int i = 0; i < blockDim.x; i++, id+=size) { + output[id] = block[i][threadIdx.x]; + } + } + + __syncthreads(); + } +} + + +__global__ void kernelColorAxisWithSpacing(ENCODED_INT_TYPE *input, ENCODED_INT_TYPE *output, int size, double sx, double sy, double sz) +{ + __shared__ ENCODED_INT_TYPE block[BLOCKSIZE][BLOCKSIZE]; + + int col = threadIdx.x; + int tid = threadIdx.y; + int tx = blockIdx.x * blockDim.x + col; + int tz = blockIdx.y; + + ENCODED_INT_TYPE x1, y1, z1, x2, y2, z2; + ENCODED_INT_TYPE last1 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), last2 = ENCODE(ZERO,ZERO,ZERO,ONE,ZERO), lasty; + double dx, dy, dz, best, dist; + + lasty = size - 1; + + last2 = input[TOID(tx, lasty, tz, size)]; + DECODE(last2, x2, y2, z2); + + if (NOTSITE(last2)) { + lasty = y2; + if(HASNEXT(last2)) { + last2 = input[TOID(tx, lasty, tz, size)]; + DECODE(last2, x2, y2, z2); + } + } + + if (HASNEXT(last2)) { + last1 = input[TOID(tx, y2, tz, size)]; + DECODE(last1, x1, y1, z1); + } + + int y_start, y_end, n_step = size / blockDim.x; + for(int step = 0; step < n_step; ++step) { + y_start = size - step * blockDim.x - 1; + y_end = size - (step + 1) * blockDim.x; + + for (int ty = y_start - tid; ty >= y_end; ty -= blockDim.y) { + dx = static_cast(x2 - tx) * sx; + dy = static_cast(lasty - ty) * sy; + dz = static_cast(z2 - tz) * sz; + best = dx * dx + dy * dy + dz * dz; + + while (HASNEXT(last2)) { + dx = static_cast(x1 - tx) * sx; + dy = static_cast(y2 - ty) * sy; + dz = static_cast(z1 - tz) * sz; + dist = dx * dx + dy * dy + dz * dz; + + if(dist > best) break; + + best = dist; lasty = y2; last2 = last1; + DECODE(last2, x2, y2, z2); + + if (HASNEXT(last2)) { + last1 = input[TOID(tx, y2, tz, size)]; + DECODE(last1, x1, y1, z1); + } + } + + block[threadIdx.x][ty - y_end] = ENCODE(lasty, x2, z2, NOTSITE(last2), ZERO); + } + + __syncthreads(); + + if(!threadIdx.y) { + int id = TOID(y_end + threadIdx.x, blockIdx.x * blockDim.x, tz, size); + for(int i = 0; i < blockDim.x; i++, id+=size) { + output[id] = block[i][threadIdx.x]; + } + } + + __syncthreads(); + } +} + + +} // extern C diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d68a90ac8b7113aba4b3157518cfdc301bfeac42 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_base.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f4ff3c431b39e231e147fe3ba770fcf4b9dfbcf Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_base.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_compressed.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_compressed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef15ae18d377b909b684223abd2472b695fa0027 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_compressed.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_coo.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_coo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51b711002a0d79e00ceec6edd7217707ba56fc32 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_coo.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_csr.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_csr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afd0c49d6eae307fd1f62d41d2c79a716f55d6b0 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_csr.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_data.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24654ef2e52a0ac0b1eee3bdcc567432464ad7eb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_data.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_dia.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_dia.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c69db503d8429f8671155682028a07afdf6fbf7f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_dia.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_extract.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_extract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47cee313e5727b8d80a53b70c1de13b871f5bc31 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_extract.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_index.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18f6ce293d50806e03af3d7a1c40de51b368f6cb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_index.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_sputils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_sputils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10c695bce314f8cca0cf84f1a7c76fe1e107b755 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/sparse/__pycache__/_sputils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/packaging/__init__.py b/vllm/lib/python3.10/site-packages/packaging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d79f73c574ffc759ef5d2145b1ec742d85c2500b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/__init__.py @@ -0,0 +1,15 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "24.2" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = f"2014 {__author__}" diff --git a/vllm/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc b/vllm/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8416572f807323adb95890d4b22af93757ab3d82 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/packaging/__pycache__/requirements.cpython-310.pyc b/vllm/lib/python3.10/site-packages/packaging/__pycache__/requirements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3307f54979119efed185cc3945fc9e69dbe12e71 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/packaging/__pycache__/requirements.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/packaging/_elffile.py b/vllm/lib/python3.10/site-packages/packaging/_elffile.py new file mode 100644 index 0000000000000000000000000000000000000000..25f4282cc29cb03d7be881f03dee841d7dbc215a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/_elffile.py @@ -0,0 +1,110 @@ +""" +ELF file parser. + +This provides a class ``ELFFile`` that parses an ELF executable in a similar +interface to ``ZipFile``. Only the read interface is implemented. + +Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca +ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html +""" + +from __future__ import annotations + +import enum +import os +import struct +from typing import IO + + +class ELFInvalid(ValueError): + pass + + +class EIClass(enum.IntEnum): + C32 = 1 + C64 = 2 + + +class EIData(enum.IntEnum): + Lsb = 1 + Msb = 2 + + +class EMachine(enum.IntEnum): + I386 = 3 + S390 = 22 + Arm = 40 + X8664 = 62 + AArc64 = 183 + + +class ELFFile: + """ + Representation of an ELF executable. + """ + + def __init__(self, f: IO[bytes]) -> None: + self._f = f + + try: + ident = self._read("16B") + except struct.error as e: + raise ELFInvalid("unable to parse identification") from e + magic = bytes(ident[:4]) + if magic != b"\x7fELF": + raise ELFInvalid(f"invalid magic: {magic!r}") + + self.capacity = ident[4] # Format for program header (bitness). + self.encoding = ident[5] # Data structure encoding (endianness). + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, self._p_fmt, self._p_idx = { + (1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB. + (2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB. + }[(self.capacity, self.encoding)] + except KeyError as e: + raise ELFInvalid( + f"unrecognized capacity ({self.capacity}) or " + f"encoding ({self.encoding})" + ) from e + + try: + ( + _, + self.machine, # Architecture type. + _, + _, + self._e_phoff, # Offset of program header. + _, + self.flags, # Processor-specific flags. + _, + self._e_phentsize, # Size of section. + self._e_phnum, # Number of sections. + ) = self._read(e_fmt) + except struct.error as e: + raise ELFInvalid("unable to parse machine and section information") from e + + def _read(self, fmt: str) -> tuple[int, ...]: + return struct.unpack(fmt, self._f.read(struct.calcsize(fmt))) + + @property + def interpreter(self) -> str | None: + """ + The path recorded in the ``PT_INTERP`` section header. + """ + for index in range(self._e_phnum): + self._f.seek(self._e_phoff + self._e_phentsize * index) + try: + data = self._read(self._p_fmt) + except struct.error: + continue + if data[self._p_idx[0]] != 3: # Not PT_INTERP. + continue + self._f.seek(data[self._p_idx[1]]) + return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0") + return None diff --git a/vllm/lib/python3.10/site-packages/packaging/_manylinux.py b/vllm/lib/python3.10/site-packages/packaging/_manylinux.py new file mode 100644 index 0000000000000000000000000000000000000000..61339a6fcc1b82803136f3bf980e0c8f574b2220 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/_manylinux.py @@ -0,0 +1,263 @@ +from __future__ import annotations + +import collections +import contextlib +import functools +import os +import re +import sys +import warnings +from typing import Generator, Iterator, NamedTuple, Sequence + +from ._elffile import EIClass, EIData, ELFFile, EMachine + +EF_ARM_ABIMASK = 0xFF000000 +EF_ARM_ABI_VER5 = 0x05000000 +EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + +# `os.PathLike` not a generic type until Python 3.9, so sticking with `str` +# as the type for `path` until then. +@contextlib.contextmanager +def _parse_elf(path: str) -> Generator[ELFFile | None, None, None]: + try: + with open(path, "rb") as f: + yield ELFFile(f) + except (OSError, TypeError, ValueError): + yield None + + +def _is_linux_armhf(executable: str) -> bool: + # hard-float ABI can be detected from the ELF header of the running + # process + # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.Arm + and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 + and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD + ) + + +def _is_linux_i686(executable: str) -> bool: + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.I386 + ) + + +def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool: + if "armv7l" in archs: + return _is_linux_armhf(executable) + if "i686" in archs: + return _is_linux_i686(executable) + allowed_archs = { + "x86_64", + "aarch64", + "ppc64", + "ppc64le", + "s390x", + "loongarch64", + "riscv64", + } + return any(arch in allowed_archs for arch in archs) + + +# If glibc ever changes its major version, we need to know what the last +# minor version was, so we can build the complete list of all versions. +# For now, guess what the highest minor version might be, assume it will +# be 50 for testing. Once this actually happens, update the dictionary +# with the actual value. +_LAST_GLIBC_MINOR: dict[int, int] = collections.defaultdict(lambda: 50) + + +class _GLibCVersion(NamedTuple): + major: int + minor: int + + +def _glibc_version_string_confstr() -> str | None: + """ + Primary implementation of glibc_version_string using os.confstr. + """ + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module. + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 + try: + # Should be a string like "glibc 2.17". + version_string: str | None = os.confstr("CS_GNU_LIBC_VERSION") + assert version_string is not None + _, version = version_string.rsplit() + except (AssertionError, AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def _glibc_version_string_ctypes() -> str | None: + """ + Fallback implementation of glibc_version_string using ctypes. + """ + try: + import ctypes + except ImportError: + return None + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + # + # We must also handle the special case where the executable is not a + # dynamically linked executable. This can occur when using musl libc, + # for example. In this situation, dlopen() will error, leading to an + # OSError. Interestingly, at least in the case of musl, there is no + # errno set on the OSError. The single string argument used to construct + # OSError comes from libc itself and is therefore not portable to + # hard code here. In any case, failure to call dlopen() means we + # can proceed, so we bail on our attempt. + try: + process_namespace = ctypes.CDLL(None) + except OSError: + return None + + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str: str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +def _glibc_version_string() -> str | None: + """Returns glibc version string, or None if not using glibc.""" + return _glibc_version_string_confstr() or _glibc_version_string_ctypes() + + +def _parse_glibc_version(version_str: str) -> tuple[int, int]: + """Parse glibc version. + + We use a regexp instead of str.split because we want to discard any + random junk that might come after the minor version -- this might happen + in patched/forked versions of glibc (e.g. Linaro's version of glibc + uses version strings like "2.20-2014.11"). See gh-3588. + """ + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn( + f"Expected glibc version with 2 components major.minor," + f" got: {version_str}", + RuntimeWarning, + stacklevel=2, + ) + return -1, -1 + return int(m.group("major")), int(m.group("minor")) + + +@functools.lru_cache +def _get_glibc_version() -> tuple[int, int]: + version_str = _glibc_version_string() + if version_str is None: + return (-1, -1) + return _parse_glibc_version(version_str) + + +# From PEP 513, PEP 600 +def _is_compatible(arch: str, version: _GLibCVersion) -> bool: + sys_glibc = _get_glibc_version() + if sys_glibc < version: + return False + # Check for presence of _manylinux module. + try: + import _manylinux + except ImportError: + return True + if hasattr(_manylinux, "manylinux_compatible"): + result = _manylinux.manylinux_compatible(version[0], version[1], arch) + if result is not None: + return bool(result) + return True + if version == _GLibCVersion(2, 5): + if hasattr(_manylinux, "manylinux1_compatible"): + return bool(_manylinux.manylinux1_compatible) + if version == _GLibCVersion(2, 12): + if hasattr(_manylinux, "manylinux2010_compatible"): + return bool(_manylinux.manylinux2010_compatible) + if version == _GLibCVersion(2, 17): + if hasattr(_manylinux, "manylinux2014_compatible"): + return bool(_manylinux.manylinux2014_compatible) + return True + + +_LEGACY_MANYLINUX_MAP = { + # CentOS 7 w/ glibc 2.17 (PEP 599) + (2, 17): "manylinux2014", + # CentOS 6 w/ glibc 2.12 (PEP 571) + (2, 12): "manylinux2010", + # CentOS 5 w/ glibc 2.5 (PEP 513) + (2, 5): "manylinux1", +} + + +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate manylinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be manylinux-compatible. + + :returns: An iterator of compatible manylinux tags. + """ + if not _have_compatible_abi(sys.executable, archs): + return + # Oldest glibc to be supported regardless of architecture is (2, 17). + too_old_glibc2 = _GLibCVersion(2, 16) + if set(archs) & {"x86_64", "i686"}: + # On x86/i686 also oldest glibc to be supported is (2, 5). + too_old_glibc2 = _GLibCVersion(2, 4) + current_glibc = _GLibCVersion(*_get_glibc_version()) + glibc_max_list = [current_glibc] + # We can assume compatibility across glibc major versions. + # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 + # + # Build a list of maximum glibc versions so that we can + # output the canonical list of all glibc from current_glibc + # down to too_old_glibc2, including all intermediary versions. + for glibc_major in range(current_glibc.major - 1, 1, -1): + glibc_minor = _LAST_GLIBC_MINOR[glibc_major] + glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) + for arch in archs: + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(arch, glibc_version): + yield f"{tag}_{arch}" + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(arch, glibc_version): + yield f"{legacy_tag}_{arch}" diff --git a/vllm/lib/python3.10/site-packages/packaging/_musllinux.py b/vllm/lib/python3.10/site-packages/packaging/_musllinux.py new file mode 100644 index 0000000000000000000000000000000000000000..d2bf30b56319ba862c5c9a1a39a87c6d1cb68718 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/_musllinux.py @@ -0,0 +1,85 @@ +"""PEP 656 support. + +This module implements logic to detect if the currently running Python is +linked against musl, and what musl version is used. +""" + +from __future__ import annotations + +import functools +import re +import subprocess +import sys +from typing import Iterator, NamedTuple, Sequence + +from ._elffile import ELFFile + + +class _MuslVersion(NamedTuple): + major: int + minor: int + + +def _parse_musl_version(output: str) -> _MuslVersion | None: + lines = [n for n in (n.strip() for n in output.splitlines()) if n] + if len(lines) < 2 or lines[0][:4] != "musl": + return None + m = re.match(r"Version (\d+)\.(\d+)", lines[1]) + if not m: + return None + return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) + + +@functools.lru_cache +def _get_musl_version(executable: str) -> _MuslVersion | None: + """Detect currently-running musl runtime version. + + This is done by checking the specified executable's dynamic linking + information, and invoking the loader to parse its output for a version + string. If the loader is musl, the output would be something like:: + + musl libc (x86_64) + Version 1.2.2 + Dynamic Program Loader + """ + try: + with open(executable, "rb") as f: + ld = ELFFile(f).interpreter + except (OSError, TypeError, ValueError): + return None + if ld is None or "musl" not in ld: + return None + proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True) + return _parse_musl_version(proc.stderr) + + +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate musllinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be musllinux-compatible. + + :returns: An iterator of compatible musllinux tags. + """ + sys_musl = _get_musl_version(sys.executable) + if sys_musl is None: # Python not dynamically linked against musl. + return + for arch in archs: + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + + +if __name__ == "__main__": # pragma: no cover + import sysconfig + + plat = sysconfig.get_platform() + assert plat.startswith("linux-"), "not linux" + + print("plat:", plat) + print("musl:", _get_musl_version(sys.executable)) + print("tags:", end=" ") + for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): + print(t, end="\n ") diff --git a/vllm/lib/python3.10/site-packages/packaging/_parser.py b/vllm/lib/python3.10/site-packages/packaging/_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..c1238c06eab95f8c90c393383a703aa3b8c366a5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/_parser.py @@ -0,0 +1,354 @@ +"""Handwritten parser of dependency specifiers. + +The docstring for each __parse_* function contains EBNF-inspired grammar representing +the implementation. +""" + +from __future__ import annotations + +import ast +from typing import NamedTuple, Sequence, Tuple, Union + +from ._tokenizer import DEFAULT_RULES, Tokenizer + + +class Node: + def __init__(self, value: str) -> None: + self.value = value + + def __str__(self) -> str: + return self.value + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +MarkerVar = Union[Variable, Value] +MarkerItem = Tuple[MarkerVar, Op, MarkerVar] +MarkerAtom = Union[MarkerItem, Sequence["MarkerAtom"]] +MarkerList = Sequence[Union["MarkerList", MarkerAtom, str]] + + +class ParsedRequirement(NamedTuple): + name: str + url: str + extras: list[str] + specifier: str + marker: MarkerList | None + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for dependency specifier +# -------------------------------------------------------------------------------------- +def parse_requirement(source: str) -> ParsedRequirement: + return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: + """ + requirement = WS? IDENTIFIER WS? extras WS? requirement_details + """ + tokenizer.consume("WS") + + name_token = tokenizer.expect( + "IDENTIFIER", expected="package name at the start of dependency specifier" + ) + name = name_token.text + tokenizer.consume("WS") + + extras = _parse_extras(tokenizer) + tokenizer.consume("WS") + + url, specifier, marker = _parse_requirement_details(tokenizer) + tokenizer.expect("END", expected="end of dependency specifier") + + return ParsedRequirement(name, url, extras, specifier, marker) + + +def _parse_requirement_details( + tokenizer: Tokenizer, +) -> tuple[str, str, MarkerList | None]: + """ + requirement_details = AT URL (WS requirement_marker?)? + | specifier WS? (requirement_marker)? + """ + + specifier = "" + url = "" + marker = None + + if tokenizer.check("AT"): + tokenizer.read() + tokenizer.consume("WS") + + url_start = tokenizer.position + url = tokenizer.expect("URL", expected="URL after @").text + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + tokenizer.expect("WS", expected="whitespace after URL") + + # The input might end after whitespace. + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, span_start=url_start, after="URL and whitespace" + ) + else: + specifier_start = tokenizer.position + specifier = _parse_specifier(tokenizer) + tokenizer.consume("WS") + + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, + span_start=specifier_start, + after=( + "version specifier" + if specifier + else "name and no valid version specifier" + ), + ) + + return (url, specifier, marker) + + +def _parse_requirement_marker( + tokenizer: Tokenizer, *, span_start: int, after: str +) -> MarkerList: + """ + requirement_marker = SEMICOLON marker WS? + """ + + if not tokenizer.check("SEMICOLON"): + tokenizer.raise_syntax_error( + f"Expected end or semicolon (after {after})", + span_start=span_start, + ) + tokenizer.read() + + marker = _parse_marker(tokenizer) + tokenizer.consume("WS") + + return marker + + +def _parse_extras(tokenizer: Tokenizer) -> list[str]: + """ + extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? + """ + if not tokenizer.check("LEFT_BRACKET", peek=True): + return [] + + with tokenizer.enclosing_tokens( + "LEFT_BRACKET", + "RIGHT_BRACKET", + around="extras", + ): + tokenizer.consume("WS") + extras = _parse_extras_list(tokenizer) + tokenizer.consume("WS") + + return extras + + +def _parse_extras_list(tokenizer: Tokenizer) -> list[str]: + """ + extras_list = identifier (wsp* ',' wsp* identifier)* + """ + extras: list[str] = [] + + if not tokenizer.check("IDENTIFIER"): + return extras + + extras.append(tokenizer.read().text) + + while True: + tokenizer.consume("WS") + if tokenizer.check("IDENTIFIER", peek=True): + tokenizer.raise_syntax_error("Expected comma between extra names") + elif not tokenizer.check("COMMA"): + break + + tokenizer.read() + tokenizer.consume("WS") + + extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") + extras.append(extra_token.text) + + return extras + + +def _parse_specifier(tokenizer: Tokenizer) -> str: + """ + specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS + | WS? version_many WS? + """ + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="version specifier", + ): + tokenizer.consume("WS") + parsed_specifiers = _parse_version_many(tokenizer) + tokenizer.consume("WS") + + return parsed_specifiers + + +def _parse_version_many(tokenizer: Tokenizer) -> str: + """ + version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? + """ + parsed_specifiers = "" + while tokenizer.check("SPECIFIER"): + span_start = tokenizer.position + parsed_specifiers += tokenizer.read().text + if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): + tokenizer.raise_syntax_error( + ".* suffix can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position + 1, + ) + if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): + tokenizer.raise_syntax_error( + "Local version label can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position, + ) + tokenizer.consume("WS") + if not tokenizer.check("COMMA"): + break + parsed_specifiers += tokenizer.read().text + tokenizer.consume("WS") + + return parsed_specifiers + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for marker expression +# -------------------------------------------------------------------------------------- +def parse_marker(source: str) -> MarkerList: + return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList: + retval = _parse_marker(tokenizer) + tokenizer.expect("END", expected="end of marker expression") + return retval + + +def _parse_marker(tokenizer: Tokenizer) -> MarkerList: + """ + marker = marker_atom (BOOLOP marker_atom)+ + """ + expression = [_parse_marker_atom(tokenizer)] + while tokenizer.check("BOOLOP"): + token = tokenizer.read() + expr_right = _parse_marker_atom(tokenizer) + expression.extend((token.text, expr_right)) + return expression + + +def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: + """ + marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? + | WS? marker_item WS? + """ + + tokenizer.consume("WS") + if tokenizer.check("LEFT_PARENTHESIS", peek=True): + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="marker expression", + ): + tokenizer.consume("WS") + marker: MarkerAtom = _parse_marker(tokenizer) + tokenizer.consume("WS") + else: + marker = _parse_marker_item(tokenizer) + tokenizer.consume("WS") + return marker + + +def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: + """ + marker_item = WS? marker_var WS? marker_op WS? marker_var WS? + """ + tokenizer.consume("WS") + marker_var_left = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + marker_op = _parse_marker_op(tokenizer) + tokenizer.consume("WS") + marker_var_right = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + return (marker_var_left, marker_op, marker_var_right) + + +def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: + """ + marker_var = VARIABLE | QUOTED_STRING + """ + if tokenizer.check("VARIABLE"): + return process_env_var(tokenizer.read().text.replace(".", "_")) + elif tokenizer.check("QUOTED_STRING"): + return process_python_str(tokenizer.read().text) + else: + tokenizer.raise_syntax_error( + message="Expected a marker variable or quoted string" + ) + + +def process_env_var(env_var: str) -> Variable: + if env_var in ("platform_python_implementation", "python_implementation"): + return Variable("platform_python_implementation") + else: + return Variable(env_var) + + +def process_python_str(python_str: str) -> Value: + value = ast.literal_eval(python_str) + return Value(str(value)) + + +def _parse_marker_op(tokenizer: Tokenizer) -> Op: + """ + marker_op = IN | NOT IN | OP + """ + if tokenizer.check("IN"): + tokenizer.read() + return Op("in") + elif tokenizer.check("NOT"): + tokenizer.read() + tokenizer.expect("WS", expected="whitespace after 'not'") + tokenizer.expect("IN", expected="'in' after 'not'") + return Op("not in") + elif tokenizer.check("OP"): + return Op(tokenizer.read().text) + else: + return tokenizer.raise_syntax_error( + "Expected marker operator, one of " + "<=, <, !=, ==, >=, >, ~=, ===, in, not in" + ) diff --git a/vllm/lib/python3.10/site-packages/packaging/_structures.py b/vllm/lib/python3.10/site-packages/packaging/_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..90a6465f9682c886363eea5327dac64bf623a6ff --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/_structures.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/vllm/lib/python3.10/site-packages/packaging/_tokenizer.py b/vllm/lib/python3.10/site-packages/packaging/_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..89d041605c006e326a67f399a58a1fec8eb24acf --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/_tokenizer.py @@ -0,0 +1,194 @@ +from __future__ import annotations + +import contextlib +import re +from dataclasses import dataclass +from typing import Iterator, NoReturn + +from .specifiers import Specifier + + +@dataclass +class Token: + name: str + text: str + position: int + + +class ParserSyntaxError(Exception): + """The provided source text could not be parsed correctly.""" + + def __init__( + self, + message: str, + *, + source: str, + span: tuple[int, int], + ) -> None: + self.span = span + self.message = message + self.source = source + + super().__init__() + + def __str__(self) -> str: + marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" + return "\n ".join([self.message, self.source, marker]) + + +DEFAULT_RULES: dict[str, str | re.Pattern[str]] = { + "LEFT_PARENTHESIS": r"\(", + "RIGHT_PARENTHESIS": r"\)", + "LEFT_BRACKET": r"\[", + "RIGHT_BRACKET": r"\]", + "SEMICOLON": r";", + "COMMA": r",", + "QUOTED_STRING": re.compile( + r""" + ( + ('[^']*') + | + ("[^"]*") + ) + """, + re.VERBOSE, + ), + "OP": r"(===|==|~=|!=|<=|>=|<|>)", + "BOOLOP": r"\b(or|and)\b", + "IN": r"\bin\b", + "NOT": r"\bnot\b", + "VARIABLE": re.compile( + r""" + \b( + python_version + |python_full_version + |os[._]name + |sys[._]platform + |platform_(release|system) + |platform[._](version|machine|python_implementation) + |python_implementation + |implementation_(name|version) + |extra + )\b + """, + re.VERBOSE, + ), + "SPECIFIER": re.compile( + Specifier._operator_regex_str + Specifier._version_regex_str, + re.VERBOSE | re.IGNORECASE, + ), + "AT": r"\@", + "URL": r"[^ \t]+", + "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", + "VERSION_PREFIX_TRAIL": r"\.\*", + "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*", + "WS": r"[ \t]+", + "END": r"$", +} + + +class Tokenizer: + """Context-sensitive token parsing. + + Provides methods to examine the input stream to check whether the next token + matches. + """ + + def __init__( + self, + source: str, + *, + rules: dict[str, str | re.Pattern[str]], + ) -> None: + self.source = source + self.rules: dict[str, re.Pattern[str]] = { + name: re.compile(pattern) for name, pattern in rules.items() + } + self.next_token: Token | None = None + self.position = 0 + + def consume(self, name: str) -> None: + """Move beyond provided token name, if at current position.""" + if self.check(name): + self.read() + + def check(self, name: str, *, peek: bool = False) -> bool: + """Check whether the next token has the provided name. + + By default, if the check succeeds, the token *must* be read before + another check. If `peek` is set to `True`, the token is not loaded and + would need to be checked again. + """ + assert ( + self.next_token is None + ), f"Cannot check for {name!r}, already have {self.next_token!r}" + assert name in self.rules, f"Unknown token name: {name!r}" + + expression = self.rules[name] + + match = expression.match(self.source, self.position) + if match is None: + return False + if not peek: + self.next_token = Token(name, match[0], self.position) + return True + + def expect(self, name: str, *, expected: str) -> Token: + """Expect a certain token name next, failing with a syntax error otherwise. + + The token is *not* read. + """ + if not self.check(name): + raise self.raise_syntax_error(f"Expected {expected}") + return self.read() + + def read(self) -> Token: + """Consume the next token and return it.""" + token = self.next_token + assert token is not None + + self.position += len(token.text) + self.next_token = None + + return token + + def raise_syntax_error( + self, + message: str, + *, + span_start: int | None = None, + span_end: int | None = None, + ) -> NoReturn: + """Raise ParserSyntaxError at the given position.""" + span = ( + self.position if span_start is None else span_start, + self.position if span_end is None else span_end, + ) + raise ParserSyntaxError( + message, + source=self.source, + span=span, + ) + + @contextlib.contextmanager + def enclosing_tokens( + self, open_token: str, close_token: str, *, around: str + ) -> Iterator[None]: + if self.check(open_token): + open_position = self.position + self.read() + else: + open_position = None + + yield + + if open_position is None: + return + + if not self.check(close_token): + self.raise_syntax_error( + f"Expected matching {close_token} for {open_token}, after {around}", + span_start=open_position, + ) + + self.read() diff --git a/vllm/lib/python3.10/site-packages/packaging/licenses/__init__.py b/vllm/lib/python3.10/site-packages/packaging/licenses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..569156d6ca47719f49b753a4781a86a924de173b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/licenses/__init__.py @@ -0,0 +1,145 @@ +####################################################################################### +# +# Adapted from: +# https://github.com/pypa/hatch/blob/5352e44/backend/src/hatchling/licenses/parse.py +# +# MIT License +# +# Copyright (c) 2017-present Ofek Lev +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this +# software and associated documentation files (the "Software"), to deal in the Software +# without restriction, including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be included in all copies +# or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF +# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# +# With additional allowance of arbitrary `LicenseRef-` identifiers, not just +# `LicenseRef-Public-Domain` and `LicenseRef-Proprietary`. +# +####################################################################################### +from __future__ import annotations + +import re +from typing import NewType, cast + +from packaging.licenses._spdx import EXCEPTIONS, LICENSES + +__all__ = [ + "NormalizedLicenseExpression", + "InvalidLicenseExpression", + "canonicalize_license_expression", +] + +license_ref_allowed = re.compile("^[A-Za-z0-9.-]*$") + +NormalizedLicenseExpression = NewType("NormalizedLicenseExpression", str) + + +class InvalidLicenseExpression(ValueError): + """Raised when a license-expression string is invalid + + >>> canonicalize_license_expression("invalid") + Traceback (most recent call last): + ... + packaging.licenses.InvalidLicenseExpression: Invalid license expression: 'invalid' + """ + + +def canonicalize_license_expression( + raw_license_expression: str, +) -> NormalizedLicenseExpression: + if not raw_license_expression: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) + + # Pad any parentheses so tokenization can be achieved by merely splitting on + # whitespace. + license_expression = raw_license_expression.replace("(", " ( ").replace(")", " ) ") + licenseref_prefix = "LicenseRef-" + license_refs = { + ref.lower(): "LicenseRef-" + ref[len(licenseref_prefix) :] + for ref in license_expression.split() + if ref.lower().startswith(licenseref_prefix.lower()) + } + + # Normalize to lower case so we can look up licenses/exceptions + # and so boolean operators are Python-compatible. + license_expression = license_expression.lower() + + tokens = license_expression.split() + + # Rather than implementing boolean logic, we create an expression that Python can + # parse. Everything that is not involved with the grammar itself is treated as + # `False` and the expression should evaluate as such. + python_tokens = [] + for token in tokens: + if token not in {"or", "and", "with", "(", ")"}: + python_tokens.append("False") + elif token == "with": + python_tokens.append("or") + elif token == "(" and python_tokens and python_tokens[-1] not in {"or", "and"}: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) + else: + python_tokens.append(token) + + python_expression = " ".join(python_tokens) + try: + invalid = eval(python_expression, globals(), locals()) + except Exception: + invalid = True + + if invalid is not False: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) from None + + # Take a final pass to check for unknown licenses/exceptions. + normalized_tokens = [] + for token in tokens: + if token in {"or", "and", "with", "(", ")"}: + normalized_tokens.append(token.upper()) + continue + + if normalized_tokens and normalized_tokens[-1] == "WITH": + if token not in EXCEPTIONS: + message = f"Unknown license exception: {token!r}" + raise InvalidLicenseExpression(message) + + normalized_tokens.append(EXCEPTIONS[token]["id"]) + else: + if token.endswith("+"): + final_token = token[:-1] + suffix = "+" + else: + final_token = token + suffix = "" + + if final_token.startswith("licenseref-"): + if not license_ref_allowed.match(final_token): + message = f"Invalid licenseref: {final_token!r}" + raise InvalidLicenseExpression(message) + normalized_tokens.append(license_refs[final_token] + suffix) + else: + if final_token not in LICENSES: + message = f"Unknown license: {final_token!r}" + raise InvalidLicenseExpression(message) + normalized_tokens.append(LICENSES[final_token]["id"] + suffix) + + normalized_expression = " ".join(normalized_tokens) + + return cast( + NormalizedLicenseExpression, + normalized_expression.replace("( ", "(").replace(" )", ")"), + ) diff --git a/vllm/lib/python3.10/site-packages/packaging/licenses/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/packaging/licenses/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..281222bc5fe1444f3c7a5c68dad6675b69432209 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/packaging/licenses/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/packaging/licenses/_spdx.py b/vllm/lib/python3.10/site-packages/packaging/licenses/_spdx.py new file mode 100644 index 0000000000000000000000000000000000000000..eac22276a34ccd73fc9d70c67ca318a49eb11e77 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/licenses/_spdx.py @@ -0,0 +1,759 @@ + +from __future__ import annotations + +from typing import TypedDict + +class SPDXLicense(TypedDict): + id: str + deprecated: bool + +class SPDXException(TypedDict): + id: str + deprecated: bool + + +VERSION = '3.25.0' + +LICENSES: dict[str, SPDXLicense] = { + '0bsd': {'id': '0BSD', 'deprecated': False}, + '3d-slicer-1.0': {'id': '3D-Slicer-1.0', 'deprecated': False}, + 'aal': {'id': 'AAL', 'deprecated': False}, + 'abstyles': {'id': 'Abstyles', 'deprecated': False}, + 'adacore-doc': {'id': 'AdaCore-doc', 'deprecated': False}, + 'adobe-2006': {'id': 'Adobe-2006', 'deprecated': False}, + 'adobe-display-postscript': {'id': 'Adobe-Display-PostScript', 'deprecated': False}, + 'adobe-glyph': {'id': 'Adobe-Glyph', 'deprecated': False}, + 'adobe-utopia': {'id': 'Adobe-Utopia', 'deprecated': False}, + 'adsl': {'id': 'ADSL', 'deprecated': False}, + 'afl-1.1': {'id': 'AFL-1.1', 'deprecated': False}, + 'afl-1.2': {'id': 'AFL-1.2', 'deprecated': False}, + 'afl-2.0': {'id': 'AFL-2.0', 'deprecated': False}, + 'afl-2.1': {'id': 'AFL-2.1', 'deprecated': False}, + 'afl-3.0': {'id': 'AFL-3.0', 'deprecated': False}, + 'afmparse': {'id': 'Afmparse', 'deprecated': False}, + 'agpl-1.0': {'id': 'AGPL-1.0', 'deprecated': True}, + 'agpl-1.0-only': {'id': 'AGPL-1.0-only', 'deprecated': False}, + 'agpl-1.0-or-later': {'id': 'AGPL-1.0-or-later', 'deprecated': False}, + 'agpl-3.0': {'id': 'AGPL-3.0', 'deprecated': True}, + 'agpl-3.0-only': {'id': 'AGPL-3.0-only', 'deprecated': False}, + 'agpl-3.0-or-later': {'id': 'AGPL-3.0-or-later', 'deprecated': False}, + 'aladdin': {'id': 'Aladdin', 'deprecated': False}, + 'amd-newlib': {'id': 'AMD-newlib', 'deprecated': False}, + 'amdplpa': {'id': 'AMDPLPA', 'deprecated': False}, + 'aml': {'id': 'AML', 'deprecated': False}, + 'aml-glslang': {'id': 'AML-glslang', 'deprecated': False}, + 'ampas': {'id': 'AMPAS', 'deprecated': False}, + 'antlr-pd': {'id': 'ANTLR-PD', 'deprecated': False}, + 'antlr-pd-fallback': {'id': 'ANTLR-PD-fallback', 'deprecated': False}, + 'any-osi': {'id': 'any-OSI', 'deprecated': False}, + 'apache-1.0': {'id': 'Apache-1.0', 'deprecated': False}, + 'apache-1.1': {'id': 'Apache-1.1', 'deprecated': False}, + 'apache-2.0': {'id': 'Apache-2.0', 'deprecated': False}, + 'apafml': {'id': 'APAFML', 'deprecated': False}, + 'apl-1.0': {'id': 'APL-1.0', 'deprecated': False}, + 'app-s2p': {'id': 'App-s2p', 'deprecated': False}, + 'apsl-1.0': {'id': 'APSL-1.0', 'deprecated': False}, + 'apsl-1.1': {'id': 'APSL-1.1', 'deprecated': False}, + 'apsl-1.2': {'id': 'APSL-1.2', 'deprecated': False}, + 'apsl-2.0': {'id': 'APSL-2.0', 'deprecated': False}, + 'arphic-1999': {'id': 'Arphic-1999', 'deprecated': False}, + 'artistic-1.0': {'id': 'Artistic-1.0', 'deprecated': False}, + 'artistic-1.0-cl8': {'id': 'Artistic-1.0-cl8', 'deprecated': False}, + 'artistic-1.0-perl': {'id': 'Artistic-1.0-Perl', 'deprecated': False}, + 'artistic-2.0': {'id': 'Artistic-2.0', 'deprecated': False}, + 'aswf-digital-assets-1.0': {'id': 'ASWF-Digital-Assets-1.0', 'deprecated': False}, + 'aswf-digital-assets-1.1': {'id': 'ASWF-Digital-Assets-1.1', 'deprecated': False}, + 'baekmuk': {'id': 'Baekmuk', 'deprecated': False}, + 'bahyph': {'id': 'Bahyph', 'deprecated': False}, + 'barr': {'id': 'Barr', 'deprecated': False}, + 'bcrypt-solar-designer': {'id': 'bcrypt-Solar-Designer', 'deprecated': False}, + 'beerware': {'id': 'Beerware', 'deprecated': False}, + 'bitstream-charter': {'id': 'Bitstream-Charter', 'deprecated': False}, + 'bitstream-vera': {'id': 'Bitstream-Vera', 'deprecated': False}, + 'bittorrent-1.0': {'id': 'BitTorrent-1.0', 'deprecated': False}, + 'bittorrent-1.1': {'id': 'BitTorrent-1.1', 'deprecated': False}, + 'blessing': {'id': 'blessing', 'deprecated': False}, + 'blueoak-1.0.0': {'id': 'BlueOak-1.0.0', 'deprecated': False}, + 'boehm-gc': {'id': 'Boehm-GC', 'deprecated': False}, + 'borceux': {'id': 'Borceux', 'deprecated': False}, + 'brian-gladman-2-clause': {'id': 'Brian-Gladman-2-Clause', 'deprecated': False}, + 'brian-gladman-3-clause': {'id': 'Brian-Gladman-3-Clause', 'deprecated': False}, + 'bsd-1-clause': {'id': 'BSD-1-Clause', 'deprecated': False}, + 'bsd-2-clause': {'id': 'BSD-2-Clause', 'deprecated': False}, + 'bsd-2-clause-darwin': {'id': 'BSD-2-Clause-Darwin', 'deprecated': False}, + 'bsd-2-clause-first-lines': {'id': 'BSD-2-Clause-first-lines', 'deprecated': False}, + 'bsd-2-clause-freebsd': {'id': 'BSD-2-Clause-FreeBSD', 'deprecated': True}, + 'bsd-2-clause-netbsd': {'id': 'BSD-2-Clause-NetBSD', 'deprecated': True}, + 'bsd-2-clause-patent': {'id': 'BSD-2-Clause-Patent', 'deprecated': False}, + 'bsd-2-clause-views': {'id': 'BSD-2-Clause-Views', 'deprecated': False}, + 'bsd-3-clause': {'id': 'BSD-3-Clause', 'deprecated': False}, + 'bsd-3-clause-acpica': {'id': 'BSD-3-Clause-acpica', 'deprecated': False}, + 'bsd-3-clause-attribution': {'id': 'BSD-3-Clause-Attribution', 'deprecated': False}, + 'bsd-3-clause-clear': {'id': 'BSD-3-Clause-Clear', 'deprecated': False}, + 'bsd-3-clause-flex': {'id': 'BSD-3-Clause-flex', 'deprecated': False}, + 'bsd-3-clause-hp': {'id': 'BSD-3-Clause-HP', 'deprecated': False}, + 'bsd-3-clause-lbnl': {'id': 'BSD-3-Clause-LBNL', 'deprecated': False}, + 'bsd-3-clause-modification': {'id': 'BSD-3-Clause-Modification', 'deprecated': False}, + 'bsd-3-clause-no-military-license': {'id': 'BSD-3-Clause-No-Military-License', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-license': {'id': 'BSD-3-Clause-No-Nuclear-License', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-license-2014': {'id': 'BSD-3-Clause-No-Nuclear-License-2014', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-warranty': {'id': 'BSD-3-Clause-No-Nuclear-Warranty', 'deprecated': False}, + 'bsd-3-clause-open-mpi': {'id': 'BSD-3-Clause-Open-MPI', 'deprecated': False}, + 'bsd-3-clause-sun': {'id': 'BSD-3-Clause-Sun', 'deprecated': False}, + 'bsd-4-clause': {'id': 'BSD-4-Clause', 'deprecated': False}, + 'bsd-4-clause-shortened': {'id': 'BSD-4-Clause-Shortened', 'deprecated': False}, + 'bsd-4-clause-uc': {'id': 'BSD-4-Clause-UC', 'deprecated': False}, + 'bsd-4.3reno': {'id': 'BSD-4.3RENO', 'deprecated': False}, + 'bsd-4.3tahoe': {'id': 'BSD-4.3TAHOE', 'deprecated': False}, + 'bsd-advertising-acknowledgement': {'id': 'BSD-Advertising-Acknowledgement', 'deprecated': False}, + 'bsd-attribution-hpnd-disclaimer': {'id': 'BSD-Attribution-HPND-disclaimer', 'deprecated': False}, + 'bsd-inferno-nettverk': {'id': 'BSD-Inferno-Nettverk', 'deprecated': False}, + 'bsd-protection': {'id': 'BSD-Protection', 'deprecated': False}, + 'bsd-source-beginning-file': {'id': 'BSD-Source-beginning-file', 'deprecated': False}, + 'bsd-source-code': {'id': 'BSD-Source-Code', 'deprecated': False}, + 'bsd-systemics': {'id': 'BSD-Systemics', 'deprecated': False}, + 'bsd-systemics-w3works': {'id': 'BSD-Systemics-W3Works', 'deprecated': False}, + 'bsl-1.0': {'id': 'BSL-1.0', 'deprecated': False}, + 'busl-1.1': {'id': 'BUSL-1.1', 'deprecated': False}, + 'bzip2-1.0.5': {'id': 'bzip2-1.0.5', 'deprecated': True}, + 'bzip2-1.0.6': {'id': 'bzip2-1.0.6', 'deprecated': False}, + 'c-uda-1.0': {'id': 'C-UDA-1.0', 'deprecated': False}, + 'cal-1.0': {'id': 'CAL-1.0', 'deprecated': False}, + 'cal-1.0-combined-work-exception': {'id': 'CAL-1.0-Combined-Work-Exception', 'deprecated': False}, + 'caldera': {'id': 'Caldera', 'deprecated': False}, + 'caldera-no-preamble': {'id': 'Caldera-no-preamble', 'deprecated': False}, + 'catharon': {'id': 'Catharon', 'deprecated': False}, + 'catosl-1.1': {'id': 'CATOSL-1.1', 'deprecated': False}, + 'cc-by-1.0': {'id': 'CC-BY-1.0', 'deprecated': False}, + 'cc-by-2.0': {'id': 'CC-BY-2.0', 'deprecated': False}, + 'cc-by-2.5': {'id': 'CC-BY-2.5', 'deprecated': False}, + 'cc-by-2.5-au': {'id': 'CC-BY-2.5-AU', 'deprecated': False}, + 'cc-by-3.0': {'id': 'CC-BY-3.0', 'deprecated': False}, + 'cc-by-3.0-at': {'id': 'CC-BY-3.0-AT', 'deprecated': False}, + 'cc-by-3.0-au': {'id': 'CC-BY-3.0-AU', 'deprecated': False}, + 'cc-by-3.0-de': {'id': 'CC-BY-3.0-DE', 'deprecated': False}, + 'cc-by-3.0-igo': {'id': 'CC-BY-3.0-IGO', 'deprecated': False}, + 'cc-by-3.0-nl': {'id': 'CC-BY-3.0-NL', 'deprecated': False}, + 'cc-by-3.0-us': {'id': 'CC-BY-3.0-US', 'deprecated': False}, + 'cc-by-4.0': {'id': 'CC-BY-4.0', 'deprecated': False}, + 'cc-by-nc-1.0': {'id': 'CC-BY-NC-1.0', 'deprecated': False}, + 'cc-by-nc-2.0': {'id': 'CC-BY-NC-2.0', 'deprecated': False}, + 'cc-by-nc-2.5': {'id': 'CC-BY-NC-2.5', 'deprecated': False}, + 'cc-by-nc-3.0': {'id': 'CC-BY-NC-3.0', 'deprecated': False}, + 'cc-by-nc-3.0-de': {'id': 'CC-BY-NC-3.0-DE', 'deprecated': False}, + 'cc-by-nc-4.0': {'id': 'CC-BY-NC-4.0', 'deprecated': False}, + 'cc-by-nc-nd-1.0': {'id': 'CC-BY-NC-ND-1.0', 'deprecated': False}, + 'cc-by-nc-nd-2.0': {'id': 'CC-BY-NC-ND-2.0', 'deprecated': False}, + 'cc-by-nc-nd-2.5': {'id': 'CC-BY-NC-ND-2.5', 'deprecated': False}, + 'cc-by-nc-nd-3.0': {'id': 'CC-BY-NC-ND-3.0', 'deprecated': False}, + 'cc-by-nc-nd-3.0-de': {'id': 'CC-BY-NC-ND-3.0-DE', 'deprecated': False}, + 'cc-by-nc-nd-3.0-igo': {'id': 'CC-BY-NC-ND-3.0-IGO', 'deprecated': False}, + 'cc-by-nc-nd-4.0': {'id': 'CC-BY-NC-ND-4.0', 'deprecated': False}, + 'cc-by-nc-sa-1.0': {'id': 'CC-BY-NC-SA-1.0', 'deprecated': False}, + 'cc-by-nc-sa-2.0': {'id': 'CC-BY-NC-SA-2.0', 'deprecated': False}, + 'cc-by-nc-sa-2.0-de': {'id': 'CC-BY-NC-SA-2.0-DE', 'deprecated': False}, + 'cc-by-nc-sa-2.0-fr': {'id': 'CC-BY-NC-SA-2.0-FR', 'deprecated': False}, + 'cc-by-nc-sa-2.0-uk': {'id': 'CC-BY-NC-SA-2.0-UK', 'deprecated': False}, + 'cc-by-nc-sa-2.5': {'id': 'CC-BY-NC-SA-2.5', 'deprecated': False}, + 'cc-by-nc-sa-3.0': {'id': 'CC-BY-NC-SA-3.0', 'deprecated': False}, + 'cc-by-nc-sa-3.0-de': {'id': 'CC-BY-NC-SA-3.0-DE', 'deprecated': False}, + 'cc-by-nc-sa-3.0-igo': {'id': 'CC-BY-NC-SA-3.0-IGO', 'deprecated': False}, + 'cc-by-nc-sa-4.0': {'id': 'CC-BY-NC-SA-4.0', 'deprecated': False}, + 'cc-by-nd-1.0': {'id': 'CC-BY-ND-1.0', 'deprecated': False}, + 'cc-by-nd-2.0': {'id': 'CC-BY-ND-2.0', 'deprecated': False}, + 'cc-by-nd-2.5': {'id': 'CC-BY-ND-2.5', 'deprecated': False}, + 'cc-by-nd-3.0': {'id': 'CC-BY-ND-3.0', 'deprecated': False}, + 'cc-by-nd-3.0-de': {'id': 'CC-BY-ND-3.0-DE', 'deprecated': False}, + 'cc-by-nd-4.0': {'id': 'CC-BY-ND-4.0', 'deprecated': False}, + 'cc-by-sa-1.0': {'id': 'CC-BY-SA-1.0', 'deprecated': False}, + 'cc-by-sa-2.0': {'id': 'CC-BY-SA-2.0', 'deprecated': False}, + 'cc-by-sa-2.0-uk': {'id': 'CC-BY-SA-2.0-UK', 'deprecated': False}, + 'cc-by-sa-2.1-jp': {'id': 'CC-BY-SA-2.1-JP', 'deprecated': False}, + 'cc-by-sa-2.5': {'id': 'CC-BY-SA-2.5', 'deprecated': False}, + 'cc-by-sa-3.0': {'id': 'CC-BY-SA-3.0', 'deprecated': False}, + 'cc-by-sa-3.0-at': {'id': 'CC-BY-SA-3.0-AT', 'deprecated': False}, + 'cc-by-sa-3.0-de': {'id': 'CC-BY-SA-3.0-DE', 'deprecated': False}, + 'cc-by-sa-3.0-igo': {'id': 'CC-BY-SA-3.0-IGO', 'deprecated': False}, + 'cc-by-sa-4.0': {'id': 'CC-BY-SA-4.0', 'deprecated': False}, + 'cc-pddc': {'id': 'CC-PDDC', 'deprecated': False}, + 'cc0-1.0': {'id': 'CC0-1.0', 'deprecated': False}, + 'cddl-1.0': {'id': 'CDDL-1.0', 'deprecated': False}, + 'cddl-1.1': {'id': 'CDDL-1.1', 'deprecated': False}, + 'cdl-1.0': {'id': 'CDL-1.0', 'deprecated': False}, + 'cdla-permissive-1.0': {'id': 'CDLA-Permissive-1.0', 'deprecated': False}, + 'cdla-permissive-2.0': {'id': 'CDLA-Permissive-2.0', 'deprecated': False}, + 'cdla-sharing-1.0': {'id': 'CDLA-Sharing-1.0', 'deprecated': False}, + 'cecill-1.0': {'id': 'CECILL-1.0', 'deprecated': False}, + 'cecill-1.1': {'id': 'CECILL-1.1', 'deprecated': False}, + 'cecill-2.0': {'id': 'CECILL-2.0', 'deprecated': False}, + 'cecill-2.1': {'id': 'CECILL-2.1', 'deprecated': False}, + 'cecill-b': {'id': 'CECILL-B', 'deprecated': False}, + 'cecill-c': {'id': 'CECILL-C', 'deprecated': False}, + 'cern-ohl-1.1': {'id': 'CERN-OHL-1.1', 'deprecated': False}, + 'cern-ohl-1.2': {'id': 'CERN-OHL-1.2', 'deprecated': False}, + 'cern-ohl-p-2.0': {'id': 'CERN-OHL-P-2.0', 'deprecated': False}, + 'cern-ohl-s-2.0': {'id': 'CERN-OHL-S-2.0', 'deprecated': False}, + 'cern-ohl-w-2.0': {'id': 'CERN-OHL-W-2.0', 'deprecated': False}, + 'cfitsio': {'id': 'CFITSIO', 'deprecated': False}, + 'check-cvs': {'id': 'check-cvs', 'deprecated': False}, + 'checkmk': {'id': 'checkmk', 'deprecated': False}, + 'clartistic': {'id': 'ClArtistic', 'deprecated': False}, + 'clips': {'id': 'Clips', 'deprecated': False}, + 'cmu-mach': {'id': 'CMU-Mach', 'deprecated': False}, + 'cmu-mach-nodoc': {'id': 'CMU-Mach-nodoc', 'deprecated': False}, + 'cnri-jython': {'id': 'CNRI-Jython', 'deprecated': False}, + 'cnri-python': {'id': 'CNRI-Python', 'deprecated': False}, + 'cnri-python-gpl-compatible': {'id': 'CNRI-Python-GPL-Compatible', 'deprecated': False}, + 'coil-1.0': {'id': 'COIL-1.0', 'deprecated': False}, + 'community-spec-1.0': {'id': 'Community-Spec-1.0', 'deprecated': False}, + 'condor-1.1': {'id': 'Condor-1.1', 'deprecated': False}, + 'copyleft-next-0.3.0': {'id': 'copyleft-next-0.3.0', 'deprecated': False}, + 'copyleft-next-0.3.1': {'id': 'copyleft-next-0.3.1', 'deprecated': False}, + 'cornell-lossless-jpeg': {'id': 'Cornell-Lossless-JPEG', 'deprecated': False}, + 'cpal-1.0': {'id': 'CPAL-1.0', 'deprecated': False}, + 'cpl-1.0': {'id': 'CPL-1.0', 'deprecated': False}, + 'cpol-1.02': {'id': 'CPOL-1.02', 'deprecated': False}, + 'cronyx': {'id': 'Cronyx', 'deprecated': False}, + 'crossword': {'id': 'Crossword', 'deprecated': False}, + 'crystalstacker': {'id': 'CrystalStacker', 'deprecated': False}, + 'cua-opl-1.0': {'id': 'CUA-OPL-1.0', 'deprecated': False}, + 'cube': {'id': 'Cube', 'deprecated': False}, + 'curl': {'id': 'curl', 'deprecated': False}, + 'cve-tou': {'id': 'cve-tou', 'deprecated': False}, + 'd-fsl-1.0': {'id': 'D-FSL-1.0', 'deprecated': False}, + 'dec-3-clause': {'id': 'DEC-3-Clause', 'deprecated': False}, + 'diffmark': {'id': 'diffmark', 'deprecated': False}, + 'dl-de-by-2.0': {'id': 'DL-DE-BY-2.0', 'deprecated': False}, + 'dl-de-zero-2.0': {'id': 'DL-DE-ZERO-2.0', 'deprecated': False}, + 'doc': {'id': 'DOC', 'deprecated': False}, + 'docbook-schema': {'id': 'DocBook-Schema', 'deprecated': False}, + 'docbook-xml': {'id': 'DocBook-XML', 'deprecated': False}, + 'dotseqn': {'id': 'Dotseqn', 'deprecated': False}, + 'drl-1.0': {'id': 'DRL-1.0', 'deprecated': False}, + 'drl-1.1': {'id': 'DRL-1.1', 'deprecated': False}, + 'dsdp': {'id': 'DSDP', 'deprecated': False}, + 'dtoa': {'id': 'dtoa', 'deprecated': False}, + 'dvipdfm': {'id': 'dvipdfm', 'deprecated': False}, + 'ecl-1.0': {'id': 'ECL-1.0', 'deprecated': False}, + 'ecl-2.0': {'id': 'ECL-2.0', 'deprecated': False}, + 'ecos-2.0': {'id': 'eCos-2.0', 'deprecated': True}, + 'efl-1.0': {'id': 'EFL-1.0', 'deprecated': False}, + 'efl-2.0': {'id': 'EFL-2.0', 'deprecated': False}, + 'egenix': {'id': 'eGenix', 'deprecated': False}, + 'elastic-2.0': {'id': 'Elastic-2.0', 'deprecated': False}, + 'entessa': {'id': 'Entessa', 'deprecated': False}, + 'epics': {'id': 'EPICS', 'deprecated': False}, + 'epl-1.0': {'id': 'EPL-1.0', 'deprecated': False}, + 'epl-2.0': {'id': 'EPL-2.0', 'deprecated': False}, + 'erlpl-1.1': {'id': 'ErlPL-1.1', 'deprecated': False}, + 'etalab-2.0': {'id': 'etalab-2.0', 'deprecated': False}, + 'eudatagrid': {'id': 'EUDatagrid', 'deprecated': False}, + 'eupl-1.0': {'id': 'EUPL-1.0', 'deprecated': False}, + 'eupl-1.1': {'id': 'EUPL-1.1', 'deprecated': False}, + 'eupl-1.2': {'id': 'EUPL-1.2', 'deprecated': False}, + 'eurosym': {'id': 'Eurosym', 'deprecated': False}, + 'fair': {'id': 'Fair', 'deprecated': False}, + 'fbm': {'id': 'FBM', 'deprecated': False}, + 'fdk-aac': {'id': 'FDK-AAC', 'deprecated': False}, + 'ferguson-twofish': {'id': 'Ferguson-Twofish', 'deprecated': False}, + 'frameworx-1.0': {'id': 'Frameworx-1.0', 'deprecated': False}, + 'freebsd-doc': {'id': 'FreeBSD-DOC', 'deprecated': False}, + 'freeimage': {'id': 'FreeImage', 'deprecated': False}, + 'fsfap': {'id': 'FSFAP', 'deprecated': False}, + 'fsfap-no-warranty-disclaimer': {'id': 'FSFAP-no-warranty-disclaimer', 'deprecated': False}, + 'fsful': {'id': 'FSFUL', 'deprecated': False}, + 'fsfullr': {'id': 'FSFULLR', 'deprecated': False}, + 'fsfullrwd': {'id': 'FSFULLRWD', 'deprecated': False}, + 'ftl': {'id': 'FTL', 'deprecated': False}, + 'furuseth': {'id': 'Furuseth', 'deprecated': False}, + 'fwlw': {'id': 'fwlw', 'deprecated': False}, + 'gcr-docs': {'id': 'GCR-docs', 'deprecated': False}, + 'gd': {'id': 'GD', 'deprecated': False}, + 'gfdl-1.1': {'id': 'GFDL-1.1', 'deprecated': True}, + 'gfdl-1.1-invariants-only': {'id': 'GFDL-1.1-invariants-only', 'deprecated': False}, + 'gfdl-1.1-invariants-or-later': {'id': 'GFDL-1.1-invariants-or-later', 'deprecated': False}, + 'gfdl-1.1-no-invariants-only': {'id': 'GFDL-1.1-no-invariants-only', 'deprecated': False}, + 'gfdl-1.1-no-invariants-or-later': {'id': 'GFDL-1.1-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.1-only': {'id': 'GFDL-1.1-only', 'deprecated': False}, + 'gfdl-1.1-or-later': {'id': 'GFDL-1.1-or-later', 'deprecated': False}, + 'gfdl-1.2': {'id': 'GFDL-1.2', 'deprecated': True}, + 'gfdl-1.2-invariants-only': {'id': 'GFDL-1.2-invariants-only', 'deprecated': False}, + 'gfdl-1.2-invariants-or-later': {'id': 'GFDL-1.2-invariants-or-later', 'deprecated': False}, + 'gfdl-1.2-no-invariants-only': {'id': 'GFDL-1.2-no-invariants-only', 'deprecated': False}, + 'gfdl-1.2-no-invariants-or-later': {'id': 'GFDL-1.2-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.2-only': {'id': 'GFDL-1.2-only', 'deprecated': False}, + 'gfdl-1.2-or-later': {'id': 'GFDL-1.2-or-later', 'deprecated': False}, + 'gfdl-1.3': {'id': 'GFDL-1.3', 'deprecated': True}, + 'gfdl-1.3-invariants-only': {'id': 'GFDL-1.3-invariants-only', 'deprecated': False}, + 'gfdl-1.3-invariants-or-later': {'id': 'GFDL-1.3-invariants-or-later', 'deprecated': False}, + 'gfdl-1.3-no-invariants-only': {'id': 'GFDL-1.3-no-invariants-only', 'deprecated': False}, + 'gfdl-1.3-no-invariants-or-later': {'id': 'GFDL-1.3-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.3-only': {'id': 'GFDL-1.3-only', 'deprecated': False}, + 'gfdl-1.3-or-later': {'id': 'GFDL-1.3-or-later', 'deprecated': False}, + 'giftware': {'id': 'Giftware', 'deprecated': False}, + 'gl2ps': {'id': 'GL2PS', 'deprecated': False}, + 'glide': {'id': 'Glide', 'deprecated': False}, + 'glulxe': {'id': 'Glulxe', 'deprecated': False}, + 'glwtpl': {'id': 'GLWTPL', 'deprecated': False}, + 'gnuplot': {'id': 'gnuplot', 'deprecated': False}, + 'gpl-1.0': {'id': 'GPL-1.0', 'deprecated': True}, + 'gpl-1.0+': {'id': 'GPL-1.0+', 'deprecated': True}, + 'gpl-1.0-only': {'id': 'GPL-1.0-only', 'deprecated': False}, + 'gpl-1.0-or-later': {'id': 'GPL-1.0-or-later', 'deprecated': False}, + 'gpl-2.0': {'id': 'GPL-2.0', 'deprecated': True}, + 'gpl-2.0+': {'id': 'GPL-2.0+', 'deprecated': True}, + 'gpl-2.0-only': {'id': 'GPL-2.0-only', 'deprecated': False}, + 'gpl-2.0-or-later': {'id': 'GPL-2.0-or-later', 'deprecated': False}, + 'gpl-2.0-with-autoconf-exception': {'id': 'GPL-2.0-with-autoconf-exception', 'deprecated': True}, + 'gpl-2.0-with-bison-exception': {'id': 'GPL-2.0-with-bison-exception', 'deprecated': True}, + 'gpl-2.0-with-classpath-exception': {'id': 'GPL-2.0-with-classpath-exception', 'deprecated': True}, + 'gpl-2.0-with-font-exception': {'id': 'GPL-2.0-with-font-exception', 'deprecated': True}, + 'gpl-2.0-with-gcc-exception': {'id': 'GPL-2.0-with-GCC-exception', 'deprecated': True}, + 'gpl-3.0': {'id': 'GPL-3.0', 'deprecated': True}, + 'gpl-3.0+': {'id': 'GPL-3.0+', 'deprecated': True}, + 'gpl-3.0-only': {'id': 'GPL-3.0-only', 'deprecated': False}, + 'gpl-3.0-or-later': {'id': 'GPL-3.0-or-later', 'deprecated': False}, + 'gpl-3.0-with-autoconf-exception': {'id': 'GPL-3.0-with-autoconf-exception', 'deprecated': True}, + 'gpl-3.0-with-gcc-exception': {'id': 'GPL-3.0-with-GCC-exception', 'deprecated': True}, + 'graphics-gems': {'id': 'Graphics-Gems', 'deprecated': False}, + 'gsoap-1.3b': {'id': 'gSOAP-1.3b', 'deprecated': False}, + 'gtkbook': {'id': 'gtkbook', 'deprecated': False}, + 'gutmann': {'id': 'Gutmann', 'deprecated': False}, + 'haskellreport': {'id': 'HaskellReport', 'deprecated': False}, + 'hdparm': {'id': 'hdparm', 'deprecated': False}, + 'hidapi': {'id': 'HIDAPI', 'deprecated': False}, + 'hippocratic-2.1': {'id': 'Hippocratic-2.1', 'deprecated': False}, + 'hp-1986': {'id': 'HP-1986', 'deprecated': False}, + 'hp-1989': {'id': 'HP-1989', 'deprecated': False}, + 'hpnd': {'id': 'HPND', 'deprecated': False}, + 'hpnd-dec': {'id': 'HPND-DEC', 'deprecated': False}, + 'hpnd-doc': {'id': 'HPND-doc', 'deprecated': False}, + 'hpnd-doc-sell': {'id': 'HPND-doc-sell', 'deprecated': False}, + 'hpnd-export-us': {'id': 'HPND-export-US', 'deprecated': False}, + 'hpnd-export-us-acknowledgement': {'id': 'HPND-export-US-acknowledgement', 'deprecated': False}, + 'hpnd-export-us-modify': {'id': 'HPND-export-US-modify', 'deprecated': False}, + 'hpnd-export2-us': {'id': 'HPND-export2-US', 'deprecated': False}, + 'hpnd-fenneberg-livingston': {'id': 'HPND-Fenneberg-Livingston', 'deprecated': False}, + 'hpnd-inria-imag': {'id': 'HPND-INRIA-IMAG', 'deprecated': False}, + 'hpnd-intel': {'id': 'HPND-Intel', 'deprecated': False}, + 'hpnd-kevlin-henney': {'id': 'HPND-Kevlin-Henney', 'deprecated': False}, + 'hpnd-markus-kuhn': {'id': 'HPND-Markus-Kuhn', 'deprecated': False}, + 'hpnd-merchantability-variant': {'id': 'HPND-merchantability-variant', 'deprecated': False}, + 'hpnd-mit-disclaimer': {'id': 'HPND-MIT-disclaimer', 'deprecated': False}, + 'hpnd-netrek': {'id': 'HPND-Netrek', 'deprecated': False}, + 'hpnd-pbmplus': {'id': 'HPND-Pbmplus', 'deprecated': False}, + 'hpnd-sell-mit-disclaimer-xserver': {'id': 'HPND-sell-MIT-disclaimer-xserver', 'deprecated': False}, + 'hpnd-sell-regexpr': {'id': 'HPND-sell-regexpr', 'deprecated': False}, + 'hpnd-sell-variant': {'id': 'HPND-sell-variant', 'deprecated': False}, + 'hpnd-sell-variant-mit-disclaimer': {'id': 'HPND-sell-variant-MIT-disclaimer', 'deprecated': False}, + 'hpnd-sell-variant-mit-disclaimer-rev': {'id': 'HPND-sell-variant-MIT-disclaimer-rev', 'deprecated': False}, + 'hpnd-uc': {'id': 'HPND-UC', 'deprecated': False}, + 'hpnd-uc-export-us': {'id': 'HPND-UC-export-US', 'deprecated': False}, + 'htmltidy': {'id': 'HTMLTIDY', 'deprecated': False}, + 'ibm-pibs': {'id': 'IBM-pibs', 'deprecated': False}, + 'icu': {'id': 'ICU', 'deprecated': False}, + 'iec-code-components-eula': {'id': 'IEC-Code-Components-EULA', 'deprecated': False}, + 'ijg': {'id': 'IJG', 'deprecated': False}, + 'ijg-short': {'id': 'IJG-short', 'deprecated': False}, + 'imagemagick': {'id': 'ImageMagick', 'deprecated': False}, + 'imatix': {'id': 'iMatix', 'deprecated': False}, + 'imlib2': {'id': 'Imlib2', 'deprecated': False}, + 'info-zip': {'id': 'Info-ZIP', 'deprecated': False}, + 'inner-net-2.0': {'id': 'Inner-Net-2.0', 'deprecated': False}, + 'intel': {'id': 'Intel', 'deprecated': False}, + 'intel-acpi': {'id': 'Intel-ACPI', 'deprecated': False}, + 'interbase-1.0': {'id': 'Interbase-1.0', 'deprecated': False}, + 'ipa': {'id': 'IPA', 'deprecated': False}, + 'ipl-1.0': {'id': 'IPL-1.0', 'deprecated': False}, + 'isc': {'id': 'ISC', 'deprecated': False}, + 'isc-veillard': {'id': 'ISC-Veillard', 'deprecated': False}, + 'jam': {'id': 'Jam', 'deprecated': False}, + 'jasper-2.0': {'id': 'JasPer-2.0', 'deprecated': False}, + 'jpl-image': {'id': 'JPL-image', 'deprecated': False}, + 'jpnic': {'id': 'JPNIC', 'deprecated': False}, + 'json': {'id': 'JSON', 'deprecated': False}, + 'kastrup': {'id': 'Kastrup', 'deprecated': False}, + 'kazlib': {'id': 'Kazlib', 'deprecated': False}, + 'knuth-ctan': {'id': 'Knuth-CTAN', 'deprecated': False}, + 'lal-1.2': {'id': 'LAL-1.2', 'deprecated': False}, + 'lal-1.3': {'id': 'LAL-1.3', 'deprecated': False}, + 'latex2e': {'id': 'Latex2e', 'deprecated': False}, + 'latex2e-translated-notice': {'id': 'Latex2e-translated-notice', 'deprecated': False}, + 'leptonica': {'id': 'Leptonica', 'deprecated': False}, + 'lgpl-2.0': {'id': 'LGPL-2.0', 'deprecated': True}, + 'lgpl-2.0+': {'id': 'LGPL-2.0+', 'deprecated': True}, + 'lgpl-2.0-only': {'id': 'LGPL-2.0-only', 'deprecated': False}, + 'lgpl-2.0-or-later': {'id': 'LGPL-2.0-or-later', 'deprecated': False}, + 'lgpl-2.1': {'id': 'LGPL-2.1', 'deprecated': True}, + 'lgpl-2.1+': {'id': 'LGPL-2.1+', 'deprecated': True}, + 'lgpl-2.1-only': {'id': 'LGPL-2.1-only', 'deprecated': False}, + 'lgpl-2.1-or-later': {'id': 'LGPL-2.1-or-later', 'deprecated': False}, + 'lgpl-3.0': {'id': 'LGPL-3.0', 'deprecated': True}, + 'lgpl-3.0+': {'id': 'LGPL-3.0+', 'deprecated': True}, + 'lgpl-3.0-only': {'id': 'LGPL-3.0-only', 'deprecated': False}, + 'lgpl-3.0-or-later': {'id': 'LGPL-3.0-or-later', 'deprecated': False}, + 'lgpllr': {'id': 'LGPLLR', 'deprecated': False}, + 'libpng': {'id': 'Libpng', 'deprecated': False}, + 'libpng-2.0': {'id': 'libpng-2.0', 'deprecated': False}, + 'libselinux-1.0': {'id': 'libselinux-1.0', 'deprecated': False}, + 'libtiff': {'id': 'libtiff', 'deprecated': False}, + 'libutil-david-nugent': {'id': 'libutil-David-Nugent', 'deprecated': False}, + 'liliq-p-1.1': {'id': 'LiLiQ-P-1.1', 'deprecated': False}, + 'liliq-r-1.1': {'id': 'LiLiQ-R-1.1', 'deprecated': False}, + 'liliq-rplus-1.1': {'id': 'LiLiQ-Rplus-1.1', 'deprecated': False}, + 'linux-man-pages-1-para': {'id': 'Linux-man-pages-1-para', 'deprecated': False}, + 'linux-man-pages-copyleft': {'id': 'Linux-man-pages-copyleft', 'deprecated': False}, + 'linux-man-pages-copyleft-2-para': {'id': 'Linux-man-pages-copyleft-2-para', 'deprecated': False}, + 'linux-man-pages-copyleft-var': {'id': 'Linux-man-pages-copyleft-var', 'deprecated': False}, + 'linux-openib': {'id': 'Linux-OpenIB', 'deprecated': False}, + 'loop': {'id': 'LOOP', 'deprecated': False}, + 'lpd-document': {'id': 'LPD-document', 'deprecated': False}, + 'lpl-1.0': {'id': 'LPL-1.0', 'deprecated': False}, + 'lpl-1.02': {'id': 'LPL-1.02', 'deprecated': False}, + 'lppl-1.0': {'id': 'LPPL-1.0', 'deprecated': False}, + 'lppl-1.1': {'id': 'LPPL-1.1', 'deprecated': False}, + 'lppl-1.2': {'id': 'LPPL-1.2', 'deprecated': False}, + 'lppl-1.3a': {'id': 'LPPL-1.3a', 'deprecated': False}, + 'lppl-1.3c': {'id': 'LPPL-1.3c', 'deprecated': False}, + 'lsof': {'id': 'lsof', 'deprecated': False}, + 'lucida-bitmap-fonts': {'id': 'Lucida-Bitmap-Fonts', 'deprecated': False}, + 'lzma-sdk-9.11-to-9.20': {'id': 'LZMA-SDK-9.11-to-9.20', 'deprecated': False}, + 'lzma-sdk-9.22': {'id': 'LZMA-SDK-9.22', 'deprecated': False}, + 'mackerras-3-clause': {'id': 'Mackerras-3-Clause', 'deprecated': False}, + 'mackerras-3-clause-acknowledgment': {'id': 'Mackerras-3-Clause-acknowledgment', 'deprecated': False}, + 'magaz': {'id': 'magaz', 'deprecated': False}, + 'mailprio': {'id': 'mailprio', 'deprecated': False}, + 'makeindex': {'id': 'MakeIndex', 'deprecated': False}, + 'martin-birgmeier': {'id': 'Martin-Birgmeier', 'deprecated': False}, + 'mcphee-slideshow': {'id': 'McPhee-slideshow', 'deprecated': False}, + 'metamail': {'id': 'metamail', 'deprecated': False}, + 'minpack': {'id': 'Minpack', 'deprecated': False}, + 'miros': {'id': 'MirOS', 'deprecated': False}, + 'mit': {'id': 'MIT', 'deprecated': False}, + 'mit-0': {'id': 'MIT-0', 'deprecated': False}, + 'mit-advertising': {'id': 'MIT-advertising', 'deprecated': False}, + 'mit-cmu': {'id': 'MIT-CMU', 'deprecated': False}, + 'mit-enna': {'id': 'MIT-enna', 'deprecated': False}, + 'mit-feh': {'id': 'MIT-feh', 'deprecated': False}, + 'mit-festival': {'id': 'MIT-Festival', 'deprecated': False}, + 'mit-khronos-old': {'id': 'MIT-Khronos-old', 'deprecated': False}, + 'mit-modern-variant': {'id': 'MIT-Modern-Variant', 'deprecated': False}, + 'mit-open-group': {'id': 'MIT-open-group', 'deprecated': False}, + 'mit-testregex': {'id': 'MIT-testregex', 'deprecated': False}, + 'mit-wu': {'id': 'MIT-Wu', 'deprecated': False}, + 'mitnfa': {'id': 'MITNFA', 'deprecated': False}, + 'mmixware': {'id': 'MMIXware', 'deprecated': False}, + 'motosoto': {'id': 'Motosoto', 'deprecated': False}, + 'mpeg-ssg': {'id': 'MPEG-SSG', 'deprecated': False}, + 'mpi-permissive': {'id': 'mpi-permissive', 'deprecated': False}, + 'mpich2': {'id': 'mpich2', 'deprecated': False}, + 'mpl-1.0': {'id': 'MPL-1.0', 'deprecated': False}, + 'mpl-1.1': {'id': 'MPL-1.1', 'deprecated': False}, + 'mpl-2.0': {'id': 'MPL-2.0', 'deprecated': False}, + 'mpl-2.0-no-copyleft-exception': {'id': 'MPL-2.0-no-copyleft-exception', 'deprecated': False}, + 'mplus': {'id': 'mplus', 'deprecated': False}, + 'ms-lpl': {'id': 'MS-LPL', 'deprecated': False}, + 'ms-pl': {'id': 'MS-PL', 'deprecated': False}, + 'ms-rl': {'id': 'MS-RL', 'deprecated': False}, + 'mtll': {'id': 'MTLL', 'deprecated': False}, + 'mulanpsl-1.0': {'id': 'MulanPSL-1.0', 'deprecated': False}, + 'mulanpsl-2.0': {'id': 'MulanPSL-2.0', 'deprecated': False}, + 'multics': {'id': 'Multics', 'deprecated': False}, + 'mup': {'id': 'Mup', 'deprecated': False}, + 'naist-2003': {'id': 'NAIST-2003', 'deprecated': False}, + 'nasa-1.3': {'id': 'NASA-1.3', 'deprecated': False}, + 'naumen': {'id': 'Naumen', 'deprecated': False}, + 'nbpl-1.0': {'id': 'NBPL-1.0', 'deprecated': False}, + 'ncbi-pd': {'id': 'NCBI-PD', 'deprecated': False}, + 'ncgl-uk-2.0': {'id': 'NCGL-UK-2.0', 'deprecated': False}, + 'ncl': {'id': 'NCL', 'deprecated': False}, + 'ncsa': {'id': 'NCSA', 'deprecated': False}, + 'net-snmp': {'id': 'Net-SNMP', 'deprecated': True}, + 'netcdf': {'id': 'NetCDF', 'deprecated': False}, + 'newsletr': {'id': 'Newsletr', 'deprecated': False}, + 'ngpl': {'id': 'NGPL', 'deprecated': False}, + 'nicta-1.0': {'id': 'NICTA-1.0', 'deprecated': False}, + 'nist-pd': {'id': 'NIST-PD', 'deprecated': False}, + 'nist-pd-fallback': {'id': 'NIST-PD-fallback', 'deprecated': False}, + 'nist-software': {'id': 'NIST-Software', 'deprecated': False}, + 'nlod-1.0': {'id': 'NLOD-1.0', 'deprecated': False}, + 'nlod-2.0': {'id': 'NLOD-2.0', 'deprecated': False}, + 'nlpl': {'id': 'NLPL', 'deprecated': False}, + 'nokia': {'id': 'Nokia', 'deprecated': False}, + 'nosl': {'id': 'NOSL', 'deprecated': False}, + 'noweb': {'id': 'Noweb', 'deprecated': False}, + 'npl-1.0': {'id': 'NPL-1.0', 'deprecated': False}, + 'npl-1.1': {'id': 'NPL-1.1', 'deprecated': False}, + 'nposl-3.0': {'id': 'NPOSL-3.0', 'deprecated': False}, + 'nrl': {'id': 'NRL', 'deprecated': False}, + 'ntp': {'id': 'NTP', 'deprecated': False}, + 'ntp-0': {'id': 'NTP-0', 'deprecated': False}, + 'nunit': {'id': 'Nunit', 'deprecated': True}, + 'o-uda-1.0': {'id': 'O-UDA-1.0', 'deprecated': False}, + 'oar': {'id': 'OAR', 'deprecated': False}, + 'occt-pl': {'id': 'OCCT-PL', 'deprecated': False}, + 'oclc-2.0': {'id': 'OCLC-2.0', 'deprecated': False}, + 'odbl-1.0': {'id': 'ODbL-1.0', 'deprecated': False}, + 'odc-by-1.0': {'id': 'ODC-By-1.0', 'deprecated': False}, + 'offis': {'id': 'OFFIS', 'deprecated': False}, + 'ofl-1.0': {'id': 'OFL-1.0', 'deprecated': False}, + 'ofl-1.0-no-rfn': {'id': 'OFL-1.0-no-RFN', 'deprecated': False}, + 'ofl-1.0-rfn': {'id': 'OFL-1.0-RFN', 'deprecated': False}, + 'ofl-1.1': {'id': 'OFL-1.1', 'deprecated': False}, + 'ofl-1.1-no-rfn': {'id': 'OFL-1.1-no-RFN', 'deprecated': False}, + 'ofl-1.1-rfn': {'id': 'OFL-1.1-RFN', 'deprecated': False}, + 'ogc-1.0': {'id': 'OGC-1.0', 'deprecated': False}, + 'ogdl-taiwan-1.0': {'id': 'OGDL-Taiwan-1.0', 'deprecated': False}, + 'ogl-canada-2.0': {'id': 'OGL-Canada-2.0', 'deprecated': False}, + 'ogl-uk-1.0': {'id': 'OGL-UK-1.0', 'deprecated': False}, + 'ogl-uk-2.0': {'id': 'OGL-UK-2.0', 'deprecated': False}, + 'ogl-uk-3.0': {'id': 'OGL-UK-3.0', 'deprecated': False}, + 'ogtsl': {'id': 'OGTSL', 'deprecated': False}, + 'oldap-1.1': {'id': 'OLDAP-1.1', 'deprecated': False}, + 'oldap-1.2': {'id': 'OLDAP-1.2', 'deprecated': False}, + 'oldap-1.3': {'id': 'OLDAP-1.3', 'deprecated': False}, + 'oldap-1.4': {'id': 'OLDAP-1.4', 'deprecated': False}, + 'oldap-2.0': {'id': 'OLDAP-2.0', 'deprecated': False}, + 'oldap-2.0.1': {'id': 'OLDAP-2.0.1', 'deprecated': False}, + 'oldap-2.1': {'id': 'OLDAP-2.1', 'deprecated': False}, + 'oldap-2.2': {'id': 'OLDAP-2.2', 'deprecated': False}, + 'oldap-2.2.1': {'id': 'OLDAP-2.2.1', 'deprecated': False}, + 'oldap-2.2.2': {'id': 'OLDAP-2.2.2', 'deprecated': False}, + 'oldap-2.3': {'id': 'OLDAP-2.3', 'deprecated': False}, + 'oldap-2.4': {'id': 'OLDAP-2.4', 'deprecated': False}, + 'oldap-2.5': {'id': 'OLDAP-2.5', 'deprecated': False}, + 'oldap-2.6': {'id': 'OLDAP-2.6', 'deprecated': False}, + 'oldap-2.7': {'id': 'OLDAP-2.7', 'deprecated': False}, + 'oldap-2.8': {'id': 'OLDAP-2.8', 'deprecated': False}, + 'olfl-1.3': {'id': 'OLFL-1.3', 'deprecated': False}, + 'oml': {'id': 'OML', 'deprecated': False}, + 'openpbs-2.3': {'id': 'OpenPBS-2.3', 'deprecated': False}, + 'openssl': {'id': 'OpenSSL', 'deprecated': False}, + 'openssl-standalone': {'id': 'OpenSSL-standalone', 'deprecated': False}, + 'openvision': {'id': 'OpenVision', 'deprecated': False}, + 'opl-1.0': {'id': 'OPL-1.0', 'deprecated': False}, + 'opl-uk-3.0': {'id': 'OPL-UK-3.0', 'deprecated': False}, + 'opubl-1.0': {'id': 'OPUBL-1.0', 'deprecated': False}, + 'oset-pl-2.1': {'id': 'OSET-PL-2.1', 'deprecated': False}, + 'osl-1.0': {'id': 'OSL-1.0', 'deprecated': False}, + 'osl-1.1': {'id': 'OSL-1.1', 'deprecated': False}, + 'osl-2.0': {'id': 'OSL-2.0', 'deprecated': False}, + 'osl-2.1': {'id': 'OSL-2.1', 'deprecated': False}, + 'osl-3.0': {'id': 'OSL-3.0', 'deprecated': False}, + 'padl': {'id': 'PADL', 'deprecated': False}, + 'parity-6.0.0': {'id': 'Parity-6.0.0', 'deprecated': False}, + 'parity-7.0.0': {'id': 'Parity-7.0.0', 'deprecated': False}, + 'pddl-1.0': {'id': 'PDDL-1.0', 'deprecated': False}, + 'php-3.0': {'id': 'PHP-3.0', 'deprecated': False}, + 'php-3.01': {'id': 'PHP-3.01', 'deprecated': False}, + 'pixar': {'id': 'Pixar', 'deprecated': False}, + 'pkgconf': {'id': 'pkgconf', 'deprecated': False}, + 'plexus': {'id': 'Plexus', 'deprecated': False}, + 'pnmstitch': {'id': 'pnmstitch', 'deprecated': False}, + 'polyform-noncommercial-1.0.0': {'id': 'PolyForm-Noncommercial-1.0.0', 'deprecated': False}, + 'polyform-small-business-1.0.0': {'id': 'PolyForm-Small-Business-1.0.0', 'deprecated': False}, + 'postgresql': {'id': 'PostgreSQL', 'deprecated': False}, + 'ppl': {'id': 'PPL', 'deprecated': False}, + 'psf-2.0': {'id': 'PSF-2.0', 'deprecated': False}, + 'psfrag': {'id': 'psfrag', 'deprecated': False}, + 'psutils': {'id': 'psutils', 'deprecated': False}, + 'python-2.0': {'id': 'Python-2.0', 'deprecated': False}, + 'python-2.0.1': {'id': 'Python-2.0.1', 'deprecated': False}, + 'python-ldap': {'id': 'python-ldap', 'deprecated': False}, + 'qhull': {'id': 'Qhull', 'deprecated': False}, + 'qpl-1.0': {'id': 'QPL-1.0', 'deprecated': False}, + 'qpl-1.0-inria-2004': {'id': 'QPL-1.0-INRIA-2004', 'deprecated': False}, + 'radvd': {'id': 'radvd', 'deprecated': False}, + 'rdisc': {'id': 'Rdisc', 'deprecated': False}, + 'rhecos-1.1': {'id': 'RHeCos-1.1', 'deprecated': False}, + 'rpl-1.1': {'id': 'RPL-1.1', 'deprecated': False}, + 'rpl-1.5': {'id': 'RPL-1.5', 'deprecated': False}, + 'rpsl-1.0': {'id': 'RPSL-1.0', 'deprecated': False}, + 'rsa-md': {'id': 'RSA-MD', 'deprecated': False}, + 'rscpl': {'id': 'RSCPL', 'deprecated': False}, + 'ruby': {'id': 'Ruby', 'deprecated': False}, + 'ruby-pty': {'id': 'Ruby-pty', 'deprecated': False}, + 'sax-pd': {'id': 'SAX-PD', 'deprecated': False}, + 'sax-pd-2.0': {'id': 'SAX-PD-2.0', 'deprecated': False}, + 'saxpath': {'id': 'Saxpath', 'deprecated': False}, + 'scea': {'id': 'SCEA', 'deprecated': False}, + 'schemereport': {'id': 'SchemeReport', 'deprecated': False}, + 'sendmail': {'id': 'Sendmail', 'deprecated': False}, + 'sendmail-8.23': {'id': 'Sendmail-8.23', 'deprecated': False}, + 'sgi-b-1.0': {'id': 'SGI-B-1.0', 'deprecated': False}, + 'sgi-b-1.1': {'id': 'SGI-B-1.1', 'deprecated': False}, + 'sgi-b-2.0': {'id': 'SGI-B-2.0', 'deprecated': False}, + 'sgi-opengl': {'id': 'SGI-OpenGL', 'deprecated': False}, + 'sgp4': {'id': 'SGP4', 'deprecated': False}, + 'shl-0.5': {'id': 'SHL-0.5', 'deprecated': False}, + 'shl-0.51': {'id': 'SHL-0.51', 'deprecated': False}, + 'simpl-2.0': {'id': 'SimPL-2.0', 'deprecated': False}, + 'sissl': {'id': 'SISSL', 'deprecated': False}, + 'sissl-1.2': {'id': 'SISSL-1.2', 'deprecated': False}, + 'sl': {'id': 'SL', 'deprecated': False}, + 'sleepycat': {'id': 'Sleepycat', 'deprecated': False}, + 'smlnj': {'id': 'SMLNJ', 'deprecated': False}, + 'smppl': {'id': 'SMPPL', 'deprecated': False}, + 'snia': {'id': 'SNIA', 'deprecated': False}, + 'snprintf': {'id': 'snprintf', 'deprecated': False}, + 'softsurfer': {'id': 'softSurfer', 'deprecated': False}, + 'soundex': {'id': 'Soundex', 'deprecated': False}, + 'spencer-86': {'id': 'Spencer-86', 'deprecated': False}, + 'spencer-94': {'id': 'Spencer-94', 'deprecated': False}, + 'spencer-99': {'id': 'Spencer-99', 'deprecated': False}, + 'spl-1.0': {'id': 'SPL-1.0', 'deprecated': False}, + 'ssh-keyscan': {'id': 'ssh-keyscan', 'deprecated': False}, + 'ssh-openssh': {'id': 'SSH-OpenSSH', 'deprecated': False}, + 'ssh-short': {'id': 'SSH-short', 'deprecated': False}, + 'ssleay-standalone': {'id': 'SSLeay-standalone', 'deprecated': False}, + 'sspl-1.0': {'id': 'SSPL-1.0', 'deprecated': False}, + 'standardml-nj': {'id': 'StandardML-NJ', 'deprecated': True}, + 'sugarcrm-1.1.3': {'id': 'SugarCRM-1.1.3', 'deprecated': False}, + 'sun-ppp': {'id': 'Sun-PPP', 'deprecated': False}, + 'sun-ppp-2000': {'id': 'Sun-PPP-2000', 'deprecated': False}, + 'sunpro': {'id': 'SunPro', 'deprecated': False}, + 'swl': {'id': 'SWL', 'deprecated': False}, + 'swrule': {'id': 'swrule', 'deprecated': False}, + 'symlinks': {'id': 'Symlinks', 'deprecated': False}, + 'tapr-ohl-1.0': {'id': 'TAPR-OHL-1.0', 'deprecated': False}, + 'tcl': {'id': 'TCL', 'deprecated': False}, + 'tcp-wrappers': {'id': 'TCP-wrappers', 'deprecated': False}, + 'termreadkey': {'id': 'TermReadKey', 'deprecated': False}, + 'tgppl-1.0': {'id': 'TGPPL-1.0', 'deprecated': False}, + 'threeparttable': {'id': 'threeparttable', 'deprecated': False}, + 'tmate': {'id': 'TMate', 'deprecated': False}, + 'torque-1.1': {'id': 'TORQUE-1.1', 'deprecated': False}, + 'tosl': {'id': 'TOSL', 'deprecated': False}, + 'tpdl': {'id': 'TPDL', 'deprecated': False}, + 'tpl-1.0': {'id': 'TPL-1.0', 'deprecated': False}, + 'ttwl': {'id': 'TTWL', 'deprecated': False}, + 'ttyp0': {'id': 'TTYP0', 'deprecated': False}, + 'tu-berlin-1.0': {'id': 'TU-Berlin-1.0', 'deprecated': False}, + 'tu-berlin-2.0': {'id': 'TU-Berlin-2.0', 'deprecated': False}, + 'ubuntu-font-1.0': {'id': 'Ubuntu-font-1.0', 'deprecated': False}, + 'ucar': {'id': 'UCAR', 'deprecated': False}, + 'ucl-1.0': {'id': 'UCL-1.0', 'deprecated': False}, + 'ulem': {'id': 'ulem', 'deprecated': False}, + 'umich-merit': {'id': 'UMich-Merit', 'deprecated': False}, + 'unicode-3.0': {'id': 'Unicode-3.0', 'deprecated': False}, + 'unicode-dfs-2015': {'id': 'Unicode-DFS-2015', 'deprecated': False}, + 'unicode-dfs-2016': {'id': 'Unicode-DFS-2016', 'deprecated': False}, + 'unicode-tou': {'id': 'Unicode-TOU', 'deprecated': False}, + 'unixcrypt': {'id': 'UnixCrypt', 'deprecated': False}, + 'unlicense': {'id': 'Unlicense', 'deprecated': False}, + 'upl-1.0': {'id': 'UPL-1.0', 'deprecated': False}, + 'urt-rle': {'id': 'URT-RLE', 'deprecated': False}, + 'vim': {'id': 'Vim', 'deprecated': False}, + 'vostrom': {'id': 'VOSTROM', 'deprecated': False}, + 'vsl-1.0': {'id': 'VSL-1.0', 'deprecated': False}, + 'w3c': {'id': 'W3C', 'deprecated': False}, + 'w3c-19980720': {'id': 'W3C-19980720', 'deprecated': False}, + 'w3c-20150513': {'id': 'W3C-20150513', 'deprecated': False}, + 'w3m': {'id': 'w3m', 'deprecated': False}, + 'watcom-1.0': {'id': 'Watcom-1.0', 'deprecated': False}, + 'widget-workshop': {'id': 'Widget-Workshop', 'deprecated': False}, + 'wsuipa': {'id': 'Wsuipa', 'deprecated': False}, + 'wtfpl': {'id': 'WTFPL', 'deprecated': False}, + 'wxwindows': {'id': 'wxWindows', 'deprecated': True}, + 'x11': {'id': 'X11', 'deprecated': False}, + 'x11-distribute-modifications-variant': {'id': 'X11-distribute-modifications-variant', 'deprecated': False}, + 'x11-swapped': {'id': 'X11-swapped', 'deprecated': False}, + 'xdebug-1.03': {'id': 'Xdebug-1.03', 'deprecated': False}, + 'xerox': {'id': 'Xerox', 'deprecated': False}, + 'xfig': {'id': 'Xfig', 'deprecated': False}, + 'xfree86-1.1': {'id': 'XFree86-1.1', 'deprecated': False}, + 'xinetd': {'id': 'xinetd', 'deprecated': False}, + 'xkeyboard-config-zinoviev': {'id': 'xkeyboard-config-Zinoviev', 'deprecated': False}, + 'xlock': {'id': 'xlock', 'deprecated': False}, + 'xnet': {'id': 'Xnet', 'deprecated': False}, + 'xpp': {'id': 'xpp', 'deprecated': False}, + 'xskat': {'id': 'XSkat', 'deprecated': False}, + 'xzoom': {'id': 'xzoom', 'deprecated': False}, + 'ypl-1.0': {'id': 'YPL-1.0', 'deprecated': False}, + 'ypl-1.1': {'id': 'YPL-1.1', 'deprecated': False}, + 'zed': {'id': 'Zed', 'deprecated': False}, + 'zeeff': {'id': 'Zeeff', 'deprecated': False}, + 'zend-2.0': {'id': 'Zend-2.0', 'deprecated': False}, + 'zimbra-1.3': {'id': 'Zimbra-1.3', 'deprecated': False}, + 'zimbra-1.4': {'id': 'Zimbra-1.4', 'deprecated': False}, + 'zlib': {'id': 'Zlib', 'deprecated': False}, + 'zlib-acknowledgement': {'id': 'zlib-acknowledgement', 'deprecated': False}, + 'zpl-1.1': {'id': 'ZPL-1.1', 'deprecated': False}, + 'zpl-2.0': {'id': 'ZPL-2.0', 'deprecated': False}, + 'zpl-2.1': {'id': 'ZPL-2.1', 'deprecated': False}, +} + +EXCEPTIONS: dict[str, SPDXException] = { + '389-exception': {'id': '389-exception', 'deprecated': False}, + 'asterisk-exception': {'id': 'Asterisk-exception', 'deprecated': False}, + 'asterisk-linking-protocols-exception': {'id': 'Asterisk-linking-protocols-exception', 'deprecated': False}, + 'autoconf-exception-2.0': {'id': 'Autoconf-exception-2.0', 'deprecated': False}, + 'autoconf-exception-3.0': {'id': 'Autoconf-exception-3.0', 'deprecated': False}, + 'autoconf-exception-generic': {'id': 'Autoconf-exception-generic', 'deprecated': False}, + 'autoconf-exception-generic-3.0': {'id': 'Autoconf-exception-generic-3.0', 'deprecated': False}, + 'autoconf-exception-macro': {'id': 'Autoconf-exception-macro', 'deprecated': False}, + 'bison-exception-1.24': {'id': 'Bison-exception-1.24', 'deprecated': False}, + 'bison-exception-2.2': {'id': 'Bison-exception-2.2', 'deprecated': False}, + 'bootloader-exception': {'id': 'Bootloader-exception', 'deprecated': False}, + 'classpath-exception-2.0': {'id': 'Classpath-exception-2.0', 'deprecated': False}, + 'clisp-exception-2.0': {'id': 'CLISP-exception-2.0', 'deprecated': False}, + 'cryptsetup-openssl-exception': {'id': 'cryptsetup-OpenSSL-exception', 'deprecated': False}, + 'digirule-foss-exception': {'id': 'DigiRule-FOSS-exception', 'deprecated': False}, + 'ecos-exception-2.0': {'id': 'eCos-exception-2.0', 'deprecated': False}, + 'erlang-otp-linking-exception': {'id': 'erlang-otp-linking-exception', 'deprecated': False}, + 'fawkes-runtime-exception': {'id': 'Fawkes-Runtime-exception', 'deprecated': False}, + 'fltk-exception': {'id': 'FLTK-exception', 'deprecated': False}, + 'fmt-exception': {'id': 'fmt-exception', 'deprecated': False}, + 'font-exception-2.0': {'id': 'Font-exception-2.0', 'deprecated': False}, + 'freertos-exception-2.0': {'id': 'freertos-exception-2.0', 'deprecated': False}, + 'gcc-exception-2.0': {'id': 'GCC-exception-2.0', 'deprecated': False}, + 'gcc-exception-2.0-note': {'id': 'GCC-exception-2.0-note', 'deprecated': False}, + 'gcc-exception-3.1': {'id': 'GCC-exception-3.1', 'deprecated': False}, + 'gmsh-exception': {'id': 'Gmsh-exception', 'deprecated': False}, + 'gnat-exception': {'id': 'GNAT-exception', 'deprecated': False}, + 'gnome-examples-exception': {'id': 'GNOME-examples-exception', 'deprecated': False}, + 'gnu-compiler-exception': {'id': 'GNU-compiler-exception', 'deprecated': False}, + 'gnu-javamail-exception': {'id': 'gnu-javamail-exception', 'deprecated': False}, + 'gpl-3.0-interface-exception': {'id': 'GPL-3.0-interface-exception', 'deprecated': False}, + 'gpl-3.0-linking-exception': {'id': 'GPL-3.0-linking-exception', 'deprecated': False}, + 'gpl-3.0-linking-source-exception': {'id': 'GPL-3.0-linking-source-exception', 'deprecated': False}, + 'gpl-cc-1.0': {'id': 'GPL-CC-1.0', 'deprecated': False}, + 'gstreamer-exception-2005': {'id': 'GStreamer-exception-2005', 'deprecated': False}, + 'gstreamer-exception-2008': {'id': 'GStreamer-exception-2008', 'deprecated': False}, + 'i2p-gpl-java-exception': {'id': 'i2p-gpl-java-exception', 'deprecated': False}, + 'kicad-libraries-exception': {'id': 'KiCad-libraries-exception', 'deprecated': False}, + 'lgpl-3.0-linking-exception': {'id': 'LGPL-3.0-linking-exception', 'deprecated': False}, + 'libpri-openh323-exception': {'id': 'libpri-OpenH323-exception', 'deprecated': False}, + 'libtool-exception': {'id': 'Libtool-exception', 'deprecated': False}, + 'linux-syscall-note': {'id': 'Linux-syscall-note', 'deprecated': False}, + 'llgpl': {'id': 'LLGPL', 'deprecated': False}, + 'llvm-exception': {'id': 'LLVM-exception', 'deprecated': False}, + 'lzma-exception': {'id': 'LZMA-exception', 'deprecated': False}, + 'mif-exception': {'id': 'mif-exception', 'deprecated': False}, + 'nokia-qt-exception-1.1': {'id': 'Nokia-Qt-exception-1.1', 'deprecated': True}, + 'ocaml-lgpl-linking-exception': {'id': 'OCaml-LGPL-linking-exception', 'deprecated': False}, + 'occt-exception-1.0': {'id': 'OCCT-exception-1.0', 'deprecated': False}, + 'openjdk-assembly-exception-1.0': {'id': 'OpenJDK-assembly-exception-1.0', 'deprecated': False}, + 'openvpn-openssl-exception': {'id': 'openvpn-openssl-exception', 'deprecated': False}, + 'pcre2-exception': {'id': 'PCRE2-exception', 'deprecated': False}, + 'ps-or-pdf-font-exception-20170817': {'id': 'PS-or-PDF-font-exception-20170817', 'deprecated': False}, + 'qpl-1.0-inria-2004-exception': {'id': 'QPL-1.0-INRIA-2004-exception', 'deprecated': False}, + 'qt-gpl-exception-1.0': {'id': 'Qt-GPL-exception-1.0', 'deprecated': False}, + 'qt-lgpl-exception-1.1': {'id': 'Qt-LGPL-exception-1.1', 'deprecated': False}, + 'qwt-exception-1.0': {'id': 'Qwt-exception-1.0', 'deprecated': False}, + 'romic-exception': {'id': 'romic-exception', 'deprecated': False}, + 'rrdtool-floss-exception-2.0': {'id': 'RRDtool-FLOSS-exception-2.0', 'deprecated': False}, + 'sane-exception': {'id': 'SANE-exception', 'deprecated': False}, + 'shl-2.0': {'id': 'SHL-2.0', 'deprecated': False}, + 'shl-2.1': {'id': 'SHL-2.1', 'deprecated': False}, + 'stunnel-exception': {'id': 'stunnel-exception', 'deprecated': False}, + 'swi-exception': {'id': 'SWI-exception', 'deprecated': False}, + 'swift-exception': {'id': 'Swift-exception', 'deprecated': False}, + 'texinfo-exception': {'id': 'Texinfo-exception', 'deprecated': False}, + 'u-boot-exception-2.0': {'id': 'u-boot-exception-2.0', 'deprecated': False}, + 'ubdl-exception': {'id': 'UBDL-exception', 'deprecated': False}, + 'universal-foss-exception-1.0': {'id': 'Universal-FOSS-exception-1.0', 'deprecated': False}, + 'vsftpd-openssl-exception': {'id': 'vsftpd-openssl-exception', 'deprecated': False}, + 'wxwindows-exception-3.1': {'id': 'WxWindows-exception-3.1', 'deprecated': False}, + 'x11vnc-openssl-exception': {'id': 'x11vnc-openssl-exception', 'deprecated': False}, +} diff --git a/vllm/lib/python3.10/site-packages/packaging/markers.py b/vllm/lib/python3.10/site-packages/packaging/markers.py new file mode 100644 index 0000000000000000000000000000000000000000..fb7f49cf8cd43ffae71e3e8d15174d7536f9da02 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/markers.py @@ -0,0 +1,331 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import annotations + +import operator +import os +import platform +import sys +from typing import Any, Callable, TypedDict, cast + +from ._parser import MarkerAtom, MarkerList, Op, Value, Variable +from ._parser import parse_marker as _parse_marker +from ._tokenizer import ParserSyntaxError +from .specifiers import InvalidSpecifier, Specifier +from .utils import canonicalize_name + +__all__ = [ + "InvalidMarker", + "Marker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "default_environment", +] + +Operator = Callable[[str, str], bool] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Environment(TypedDict): + implementation_name: str + """The implementation's identifier, e.g. ``'cpython'``.""" + + implementation_version: str + """ + The implementation's version, e.g. ``'3.13.0a2'`` for CPython 3.13.0a2, or + ``'7.3.13'`` for PyPy3.10 v7.3.13. + """ + + os_name: str + """ + The value of :py:data:`os.name`. The name of the operating system dependent module + imported, e.g. ``'posix'``. + """ + + platform_machine: str + """ + Returns the machine type, e.g. ``'i386'``. + + An empty string if the value cannot be determined. + """ + + platform_release: str + """ + The system's release, e.g. ``'2.2.0'`` or ``'NT'``. + + An empty string if the value cannot be determined. + """ + + platform_system: str + """ + The system/OS name, e.g. ``'Linux'``, ``'Windows'`` or ``'Java'``. + + An empty string if the value cannot be determined. + """ + + platform_version: str + """ + The system's release version, e.g. ``'#3 on degas'``. + + An empty string if the value cannot be determined. + """ + + python_full_version: str + """ + The Python version as string ``'major.minor.patchlevel'``. + + Note that unlike the Python :py:data:`sys.version`, this value will always include + the patchlevel (it defaults to 0). + """ + + platform_python_implementation: str + """ + A string identifying the Python implementation, e.g. ``'CPython'``. + """ + + python_version: str + """The Python version as string ``'major.minor'``.""" + + sys_platform: str + """ + This string contains a platform identifier that can be used to append + platform-specific components to :py:data:`sys.path`, for instance. + + For Unix systems, except on Linux and AIX, this is the lowercased OS name as + returned by ``uname -s`` with the first part of the version as returned by + ``uname -r`` appended, e.g. ``'sunos5'`` or ``'freebsd8'``, at the time when Python + was built. + """ + + +def _normalize_extra_values(results: Any) -> Any: + """ + Normalize extra values. + """ + if isinstance(results[0], tuple): + lhs, op, rhs = results[0] + if isinstance(lhs, Variable) and lhs.value == "extra": + normalized_extra = canonicalize_name(rhs.value) + rhs = Value(normalized_extra) + elif isinstance(rhs, Variable) and rhs.value == "extra": + normalized_extra = canonicalize_name(lhs.value) + lhs = Value(normalized_extra) + results[0] = lhs, op, rhs + return results + + +def _format_marker( + marker: list[str] | MarkerAtom | str, first: bool | None = True +) -> str: + assert isinstance(marker, (list, tuple, str)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators: dict[str, Operator] = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs, prereleases=True) + + oper: Operator | None = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") + + return oper(lhs, rhs) + + +def _normalize(*values: str, key: str) -> tuple[str, ...]: + # PEP 685 – Comparison of extra names for optional distribution dependencies + # https://peps.python.org/pep-0685/ + # > When comparing extra names, tools MUST normalize the names being + # > compared using the semantics outlined in PEP 503 for names + if key == "extra": + return tuple(canonicalize_name(v) for v in values) + + # other environment markers don't have such standards + return values + + +def _evaluate_markers(markers: MarkerList, environment: dict[str, str]) -> bool: + groups: list[list[bool]] = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, str)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + environment_key = lhs.value + lhs_value = environment[environment_key] + rhs_value = rhs.value + else: + lhs_value = lhs.value + environment_key = rhs.value + rhs_value = environment[environment_key] + + lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info: sys._version_info) -> str: + version = f"{info.major}.{info.minor}.{info.micro}" + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment() -> Environment: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": ".".join(platform.python_version_tuple()[:2]), + "sys_platform": sys.platform, + } + + +class Marker: + def __init__(self, marker: str) -> None: + # Note: We create a Marker object without calling this constructor in + # packaging.requirements.Requirement. If any additional logic is + # added here, make sure to mirror/adapt Requirement. + try: + self._markers = _normalize_extra_values(_parse_marker(marker)) + # The attribute `_markers` can be described in terms of a recursive type: + # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] + # + # For example, the following expression: + # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") + # + # is parsed into: + # [ + # (, ')>, ), + # 'and', + # [ + # (, , ), + # 'or', + # (, , ) + # ] + # ] + except ParserSyntaxError as e: + raise InvalidMarker(str(e)) from e + + def __str__(self) -> str: + return _format_marker(self._markers) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Marker): + return NotImplemented + + return str(self) == str(other) + + def evaluate(self, environment: dict[str, str] | None = None) -> bool: + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = cast("dict[str, str]", default_environment()) + current_environment["extra"] = "" + if environment is not None: + current_environment.update(environment) + # The API used to allow setting extra to None. We need to handle this + # case for backwards compatibility. + if current_environment["extra"] is None: + current_environment["extra"] = "" + + return _evaluate_markers( + self._markers, _repair_python_full_version(current_environment) + ) + + +def _repair_python_full_version(env: dict[str, str]) -> dict[str, str]: + """ + Work around platform.python_version() returning something that is not PEP 440 + compliant for non-tagged Python builds. + """ + if env["python_full_version"].endswith("+"): + env["python_full_version"] += "local" + return env diff --git a/vllm/lib/python3.10/site-packages/packaging/metadata.py b/vllm/lib/python3.10/site-packages/packaging/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..721f411cfc44f6d24c13112e4246b5ad776a5e0b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/metadata.py @@ -0,0 +1,863 @@ +from __future__ import annotations + +import email.feedparser +import email.header +import email.message +import email.parser +import email.policy +import pathlib +import sys +import typing +from typing import ( + Any, + Callable, + Generic, + Literal, + TypedDict, + cast, +) + +from . import licenses, requirements, specifiers, utils +from . import version as version_module +from .licenses import NormalizedLicenseExpression + +T = typing.TypeVar("T") + + +if sys.version_info >= (3, 11): # pragma: no cover + ExceptionGroup = ExceptionGroup +else: # pragma: no cover + + class ExceptionGroup(Exception): + """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11. + + If :external:exc:`ExceptionGroup` is already defined by Python itself, + that version is used instead. + """ + + message: str + exceptions: list[Exception] + + def __init__(self, message: str, exceptions: list[Exception]) -> None: + self.message = message + self.exceptions = exceptions + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})" + + +class InvalidMetadata(ValueError): + """A metadata field contains invalid data.""" + + field: str + """The name of the field that contains invalid data.""" + + def __init__(self, field: str, message: str) -> None: + self.field = field + super().__init__(message) + + +# The RawMetadata class attempts to make as few assumptions about the underlying +# serialization formats as possible. The idea is that as long as a serialization +# formats offer some very basic primitives in *some* way then we can support +# serializing to and from that format. +class RawMetadata(TypedDict, total=False): + """A dictionary of raw core metadata. + + Each field in core metadata maps to a key of this dictionary (when data is + provided). The key is lower-case and underscores are used instead of dashes + compared to the equivalent core metadata field. Any core metadata field that + can be specified multiple times or can hold multiple values in a single + field have a key with a plural name. See :class:`Metadata` whose attributes + match the keys of this dictionary. + + Core metadata fields that can be specified multiple times are stored as a + list or dict depending on which is appropriate for the field. Any fields + which hold multiple values in a single field are stored as a list. + + """ + + # Metadata 1.0 - PEP 241 + metadata_version: str + name: str + version: str + platforms: list[str] + summary: str + description: str + keywords: list[str] + home_page: str + author: str + author_email: str + license: str + + # Metadata 1.1 - PEP 314 + supported_platforms: list[str] + download_url: str + classifiers: list[str] + requires: list[str] + provides: list[str] + obsoletes: list[str] + + # Metadata 1.2 - PEP 345 + maintainer: str + maintainer_email: str + requires_dist: list[str] + provides_dist: list[str] + obsoletes_dist: list[str] + requires_python: str + requires_external: list[str] + project_urls: dict[str, str] + + # Metadata 2.0 + # PEP 426 attempted to completely revamp the metadata format + # but got stuck without ever being able to build consensus on + # it and ultimately ended up withdrawn. + # + # However, a number of tools had started emitting METADATA with + # `2.0` Metadata-Version, so for historical reasons, this version + # was skipped. + + # Metadata 2.1 - PEP 566 + description_content_type: str + provides_extra: list[str] + + # Metadata 2.2 - PEP 643 + dynamic: list[str] + + # Metadata 2.3 - PEP 685 + # No new fields were added in PEP 685, just some edge case were + # tightened up to provide better interoptability. + + # Metadata 2.4 - PEP 639 + license_expression: str + license_files: list[str] + + +_STRING_FIELDS = { + "author", + "author_email", + "description", + "description_content_type", + "download_url", + "home_page", + "license", + "license_expression", + "maintainer", + "maintainer_email", + "metadata_version", + "name", + "requires_python", + "summary", + "version", +} + +_LIST_FIELDS = { + "classifiers", + "dynamic", + "license_files", + "obsoletes", + "obsoletes_dist", + "platforms", + "provides", + "provides_dist", + "provides_extra", + "requires", + "requires_dist", + "requires_external", + "supported_platforms", +} + +_DICT_FIELDS = { + "project_urls", +} + + +def _parse_keywords(data: str) -> list[str]: + """Split a string of comma-separated keywords into a list of keywords.""" + return [k.strip() for k in data.split(",")] + + +def _parse_project_urls(data: list[str]) -> dict[str, str]: + """Parse a list of label/URL string pairings separated by a comma.""" + urls = {} + for pair in data: + # Our logic is slightly tricky here as we want to try and do + # *something* reasonable with malformed data. + # + # The main thing that we have to worry about, is data that does + # not have a ',' at all to split the label from the Value. There + # isn't a singular right answer here, and we will fail validation + # later on (if the caller is validating) so it doesn't *really* + # matter, but since the missing value has to be an empty str + # and our return value is dict[str, str], if we let the key + # be the missing value, then they'd have multiple '' values that + # overwrite each other in a accumulating dict. + # + # The other potentional issue is that it's possible to have the + # same label multiple times in the metadata, with no solid "right" + # answer with what to do in that case. As such, we'll do the only + # thing we can, which is treat the field as unparseable and add it + # to our list of unparsed fields. + parts = [p.strip() for p in pair.split(",", 1)] + parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items + + # TODO: The spec doesn't say anything about if the keys should be + # considered case sensitive or not... logically they should + # be case-preserving and case-insensitive, but doing that + # would open up more cases where we might have duplicate + # entries. + label, url = parts + if label in urls: + # The label already exists in our set of urls, so this field + # is unparseable, and we can just add the whole thing to our + # unparseable data and stop processing it. + raise KeyError("duplicate labels in project urls") + urls[label] = url + + return urls + + +def _get_payload(msg: email.message.Message, source: bytes | str) -> str: + """Get the body of the message.""" + # If our source is a str, then our caller has managed encodings for us, + # and we don't need to deal with it. + if isinstance(source, str): + payload = msg.get_payload() + assert isinstance(payload, str) + return payload + # If our source is a bytes, then we're managing the encoding and we need + # to deal with it. + else: + bpayload = msg.get_payload(decode=True) + assert isinstance(bpayload, bytes) + try: + return bpayload.decode("utf8", "strict") + except UnicodeDecodeError as exc: + raise ValueError("payload in an invalid encoding") from exc + + +# The various parse_FORMAT functions here are intended to be as lenient as +# possible in their parsing, while still returning a correctly typed +# RawMetadata. +# +# To aid in this, we also generally want to do as little touching of the +# data as possible, except where there are possibly some historic holdovers +# that make valid data awkward to work with. +# +# While this is a lower level, intermediate format than our ``Metadata`` +# class, some light touch ups can make a massive difference in usability. + +# Map METADATA fields to RawMetadata. +_EMAIL_TO_RAW_MAPPING = { + "author": "author", + "author-email": "author_email", + "classifier": "classifiers", + "description": "description", + "description-content-type": "description_content_type", + "download-url": "download_url", + "dynamic": "dynamic", + "home-page": "home_page", + "keywords": "keywords", + "license": "license", + "license-expression": "license_expression", + "license-file": "license_files", + "maintainer": "maintainer", + "maintainer-email": "maintainer_email", + "metadata-version": "metadata_version", + "name": "name", + "obsoletes": "obsoletes", + "obsoletes-dist": "obsoletes_dist", + "platform": "platforms", + "project-url": "project_urls", + "provides": "provides", + "provides-dist": "provides_dist", + "provides-extra": "provides_extra", + "requires": "requires", + "requires-dist": "requires_dist", + "requires-external": "requires_external", + "requires-python": "requires_python", + "summary": "summary", + "supported-platform": "supported_platforms", + "version": "version", +} +_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()} + + +def parse_email(data: bytes | str) -> tuple[RawMetadata, dict[str, list[str]]]: + """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``). + + This function returns a two-item tuple of dicts. The first dict is of + recognized fields from the core metadata specification. Fields that can be + parsed and translated into Python's built-in types are converted + appropriately. All other fields are left as-is. Fields that are allowed to + appear multiple times are stored as lists. + + The second dict contains all other fields from the metadata. This includes + any unrecognized fields. It also includes any fields which are expected to + be parsed into a built-in type but were not formatted appropriately. Finally, + any fields that are expected to appear only once but are repeated are + included in this dict. + + """ + raw: dict[str, str | list[str] | dict[str, str]] = {} + unparsed: dict[str, list[str]] = {} + + if isinstance(data, str): + parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) + else: + parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) + + # We have to wrap parsed.keys() in a set, because in the case of multiple + # values for a key (a list), the key will appear multiple times in the + # list of keys, but we're avoiding that by using get_all(). + for name in frozenset(parsed.keys()): + # Header names in RFC are case insensitive, so we'll normalize to all + # lower case to make comparisons easier. + name = name.lower() + + # We use get_all() here, even for fields that aren't multiple use, + # because otherwise someone could have e.g. two Name fields, and we + # would just silently ignore it rather than doing something about it. + headers = parsed.get_all(name) or [] + + # The way the email module works when parsing bytes is that it + # unconditionally decodes the bytes as ascii using the surrogateescape + # handler. When you pull that data back out (such as with get_all() ), + # it looks to see if the str has any surrogate escapes, and if it does + # it wraps it in a Header object instead of returning the string. + # + # As such, we'll look for those Header objects, and fix up the encoding. + value = [] + # Flag if we have run into any issues processing the headers, thus + # signalling that the data belongs in 'unparsed'. + valid_encoding = True + for h in headers: + # It's unclear if this can return more types than just a Header or + # a str, so we'll just assert here to make sure. + assert isinstance(h, (email.header.Header, str)) + + # If it's a header object, we need to do our little dance to get + # the real data out of it. In cases where there is invalid data + # we're going to end up with mojibake, but there's no obvious, good + # way around that without reimplementing parts of the Header object + # ourselves. + # + # That should be fine since, if mojibacked happens, this key is + # going into the unparsed dict anyways. + if isinstance(h, email.header.Header): + # The Header object stores it's data as chunks, and each chunk + # can be independently encoded, so we'll need to check each + # of them. + chunks: list[tuple[bytes, str | None]] = [] + for bin, encoding in email.header.decode_header(h): + try: + bin.decode("utf8", "strict") + except UnicodeDecodeError: + # Enable mojibake. + encoding = "latin1" + valid_encoding = False + else: + encoding = "utf8" + chunks.append((bin, encoding)) + + # Turn our chunks back into a Header object, then let that + # Header object do the right thing to turn them into a + # string for us. + value.append(str(email.header.make_header(chunks))) + # This is already a string, so just add it. + else: + value.append(h) + + # We've processed all of our values to get them into a list of str, + # but we may have mojibake data, in which case this is an unparsed + # field. + if not valid_encoding: + unparsed[name] = value + continue + + raw_name = _EMAIL_TO_RAW_MAPPING.get(name) + if raw_name is None: + # This is a bit of a weird situation, we've encountered a key that + # we don't know what it means, so we don't know whether it's meant + # to be a list or not. + # + # Since we can't really tell one way or another, we'll just leave it + # as a list, even though it may be a single item list, because that's + # what makes the most sense for email headers. + unparsed[name] = value + continue + + # If this is one of our string fields, then we'll check to see if our + # value is a list of a single item. If it is then we'll assume that + # it was emitted as a single string, and unwrap the str from inside + # the list. + # + # If it's any other kind of data, then we haven't the faintest clue + # what we should parse it as, and we have to just add it to our list + # of unparsed stuff. + if raw_name in _STRING_FIELDS and len(value) == 1: + raw[raw_name] = value[0] + # If this is one of our list of string fields, then we can just assign + # the value, since email *only* has strings, and our get_all() call + # above ensures that this is a list. + elif raw_name in _LIST_FIELDS: + raw[raw_name] = value + # Special Case: Keywords + # The keywords field is implemented in the metadata spec as a str, + # but it conceptually is a list of strings, and is serialized using + # ", ".join(keywords), so we'll do some light data massaging to turn + # this into what it logically is. + elif raw_name == "keywords" and len(value) == 1: + raw[raw_name] = _parse_keywords(value[0]) + # Special Case: Project-URL + # The project urls is implemented in the metadata spec as a list of + # specially-formatted strings that represent a key and a value, which + # is fundamentally a mapping, however the email format doesn't support + # mappings in a sane way, so it was crammed into a list of strings + # instead. + # + # We will do a little light data massaging to turn this into a map as + # it logically should be. + elif raw_name == "project_urls": + try: + raw[raw_name] = _parse_project_urls(value) + except KeyError: + unparsed[name] = value + # Nothing that we've done has managed to parse this, so it'll just + # throw it in our unparseable data and move on. + else: + unparsed[name] = value + + # We need to support getting the Description from the message payload in + # addition to getting it from the the headers. This does mean, though, there + # is the possibility of it being set both ways, in which case we put both + # in 'unparsed' since we don't know which is right. + try: + payload = _get_payload(parsed, data) + except ValueError: + unparsed.setdefault("description", []).append( + parsed.get_payload(decode=isinstance(data, bytes)) # type: ignore[call-overload] + ) + else: + if payload: + # Check to see if we've already got a description, if so then both + # it, and this body move to unparseable. + if "description" in raw: + description_header = cast(str, raw.pop("description")) + unparsed.setdefault("description", []).extend( + [description_header, payload] + ) + elif "description" in unparsed: + unparsed["description"].append(payload) + else: + raw["description"] = payload + + # We need to cast our `raw` to a metadata, because a TypedDict only support + # literal key names, but we're computing our key names on purpose, but the + # way this function is implemented, our `TypedDict` can only have valid key + # names. + return cast(RawMetadata, raw), unparsed + + +_NOT_FOUND = object() + + +# Keep the two values in sync. +_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"] +_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"] + +_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"]) + + +class _Validator(Generic[T]): + """Validate a metadata field. + + All _process_*() methods correspond to a core metadata field. The method is + called with the field's raw value. If the raw value is valid it is returned + in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field). + If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause + as appropriate). + """ + + name: str + raw_name: str + added: _MetadataVersion + + def __init__( + self, + *, + added: _MetadataVersion = "1.0", + ) -> None: + self.added = added + + def __set_name__(self, _owner: Metadata, name: str) -> None: + self.name = name + self.raw_name = _RAW_TO_EMAIL_MAPPING[name] + + def __get__(self, instance: Metadata, _owner: type[Metadata]) -> T: + # With Python 3.8, the caching can be replaced with functools.cached_property(). + # No need to check the cache as attribute lookup will resolve into the + # instance's __dict__ before __get__ is called. + cache = instance.__dict__ + value = instance._raw.get(self.name) + + # To make the _process_* methods easier, we'll check if the value is None + # and if this field is NOT a required attribute, and if both of those + # things are true, we'll skip the the converter. This will mean that the + # converters never have to deal with the None union. + if self.name in _REQUIRED_ATTRS or value is not None: + try: + converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") + except AttributeError: + pass + else: + value = converter(value) + + cache[self.name] = value + try: + del instance._raw[self.name] # type: ignore[misc] + except KeyError: + pass + + return cast(T, value) + + def _invalid_metadata( + self, msg: str, cause: Exception | None = None + ) -> InvalidMetadata: + exc = InvalidMetadata( + self.raw_name, msg.format_map({"field": repr(self.raw_name)}) + ) + exc.__cause__ = cause + return exc + + def _process_metadata_version(self, value: str) -> _MetadataVersion: + # Implicitly makes Metadata-Version required. + if value not in _VALID_METADATA_VERSIONS: + raise self._invalid_metadata(f"{value!r} is not a valid metadata version") + return cast(_MetadataVersion, value) + + def _process_name(self, value: str) -> str: + if not value: + raise self._invalid_metadata("{field} is a required field") + # Validate the name as a side-effect. + try: + utils.canonicalize_name(value, validate=True) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + else: + return value + + def _process_version(self, value: str) -> version_module.Version: + if not value: + raise self._invalid_metadata("{field} is a required field") + try: + return version_module.parse(value) + except version_module.InvalidVersion as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + + def _process_summary(self, value: str) -> str: + """Check the field contains no newlines.""" + if "\n" in value: + raise self._invalid_metadata("{field} must be a single line") + return value + + def _process_description_content_type(self, value: str) -> str: + content_types = {"text/plain", "text/x-rst", "text/markdown"} + message = email.message.EmailMessage() + message["content-type"] = value + + content_type, parameters = ( + # Defaults to `text/plain` if parsing failed. + message.get_content_type().lower(), + message["content-type"].params, + ) + # Check if content-type is valid or defaulted to `text/plain` and thus was + # not parseable. + if content_type not in content_types or content_type not in value.lower(): + raise self._invalid_metadata( + f"{{field}} must be one of {list(content_types)}, not {value!r}" + ) + + charset = parameters.get("charset", "UTF-8") + if charset != "UTF-8": + raise self._invalid_metadata( + f"{{field}} can only specify the UTF-8 charset, not {list(charset)}" + ) + + markdown_variants = {"GFM", "CommonMark"} + variant = parameters.get("variant", "GFM") # Use an acceptable default. + if content_type == "text/markdown" and variant not in markdown_variants: + raise self._invalid_metadata( + f"valid Markdown variants for {{field}} are {list(markdown_variants)}, " + f"not {variant!r}", + ) + return value + + def _process_dynamic(self, value: list[str]) -> list[str]: + for dynamic_field in map(str.lower, value): + if dynamic_field in {"name", "version", "metadata-version"}: + raise self._invalid_metadata( + f"{dynamic_field!r} is not allowed as a dynamic field" + ) + elif dynamic_field not in _EMAIL_TO_RAW_MAPPING: + raise self._invalid_metadata( + f"{dynamic_field!r} is not a valid dynamic field" + ) + return list(map(str.lower, value)) + + def _process_provides_extra( + self, + value: list[str], + ) -> list[utils.NormalizedName]: + normalized_names = [] + try: + for name in value: + normalized_names.append(utils.canonicalize_name(name, validate=True)) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{name!r} is invalid for {{field}}", cause=exc + ) from exc + else: + return normalized_names + + def _process_requires_python(self, value: str) -> specifiers.SpecifierSet: + try: + return specifiers.SpecifierSet(value) + except specifiers.InvalidSpecifier as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + + def _process_requires_dist( + self, + value: list[str], + ) -> list[requirements.Requirement]: + reqs = [] + try: + for req in value: + reqs.append(requirements.Requirement(req)) + except requirements.InvalidRequirement as exc: + raise self._invalid_metadata( + f"{req!r} is invalid for {{field}}", cause=exc + ) from exc + else: + return reqs + + def _process_license_expression( + self, value: str + ) -> NormalizedLicenseExpression | None: + try: + return licenses.canonicalize_license_expression(value) + except ValueError as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + + def _process_license_files(self, value: list[str]) -> list[str]: + paths = [] + for path in value: + if ".." in path: + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, " + "parent directory indicators are not allowed" + ) + if "*" in path: + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, paths must be resolved" + ) + if ( + pathlib.PurePosixPath(path).is_absolute() + or pathlib.PureWindowsPath(path).is_absolute() + ): + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, paths must be relative" + ) + if pathlib.PureWindowsPath(path).as_posix() != path: + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, " + "paths must use '/' delimiter" + ) + paths.append(path) + return paths + + +class Metadata: + """Representation of distribution metadata. + + Compared to :class:`RawMetadata`, this class provides objects representing + metadata fields instead of only using built-in types. Any invalid metadata + will cause :exc:`InvalidMetadata` to be raised (with a + :py:attr:`~BaseException.__cause__` attribute as appropriate). + """ + + _raw: RawMetadata + + @classmethod + def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> Metadata: + """Create an instance from :class:`RawMetadata`. + + If *validate* is true, all metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + ins = cls() + ins._raw = data.copy() # Mutations occur due to caching enriched values. + + if validate: + exceptions: list[Exception] = [] + try: + metadata_version = ins.metadata_version + metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) + except InvalidMetadata as metadata_version_exc: + exceptions.append(metadata_version_exc) + metadata_version = None + + # Make sure to check for the fields that are present, the required + # fields (so their absence can be reported). + fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS + # Remove fields that have already been checked. + fields_to_check -= {"metadata_version"} + + for key in fields_to_check: + try: + if metadata_version: + # Can't use getattr() as that triggers descriptor protocol which + # will fail due to no value for the instance argument. + try: + field_metadata_version = cls.__dict__[key].added + except KeyError: + exc = InvalidMetadata(key, f"unrecognized field: {key!r}") + exceptions.append(exc) + continue + field_age = _VALID_METADATA_VERSIONS.index( + field_metadata_version + ) + if field_age > metadata_age: + field = _RAW_TO_EMAIL_MAPPING[key] + exc = InvalidMetadata( + field, + f"{field} introduced in metadata version " + f"{field_metadata_version}, not {metadata_version}", + ) + exceptions.append(exc) + continue + getattr(ins, key) + except InvalidMetadata as exc: + exceptions.append(exc) + + if exceptions: + raise ExceptionGroup("invalid metadata", exceptions) + + return ins + + @classmethod + def from_email(cls, data: bytes | str, *, validate: bool = True) -> Metadata: + """Parse metadata from email headers. + + If *validate* is true, the metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + raw, unparsed = parse_email(data) + + if validate: + exceptions: list[Exception] = [] + for unparsed_key in unparsed: + if unparsed_key in _EMAIL_TO_RAW_MAPPING: + message = f"{unparsed_key!r} has invalid data" + else: + message = f"unrecognized field: {unparsed_key!r}" + exceptions.append(InvalidMetadata(unparsed_key, message)) + + if exceptions: + raise ExceptionGroup("unparsed", exceptions) + + try: + return cls.from_raw(raw, validate=validate) + except ExceptionGroup as exc_group: + raise ExceptionGroup( + "invalid or unparsed metadata", exc_group.exceptions + ) from None + + metadata_version: _Validator[_MetadataVersion] = _Validator() + """:external:ref:`core-metadata-metadata-version` + (required; validated to be a valid metadata version)""" + # `name` is not normalized/typed to NormalizedName so as to provide access to + # the original/raw name. + name: _Validator[str] = _Validator() + """:external:ref:`core-metadata-name` + (required; validated using :func:`~packaging.utils.canonicalize_name` and its + *validate* parameter)""" + version: _Validator[version_module.Version] = _Validator() + """:external:ref:`core-metadata-version` (required)""" + dynamic: _Validator[list[str] | None] = _Validator( + added="2.2", + ) + """:external:ref:`core-metadata-dynamic` + (validated against core metadata field names and lowercased)""" + platforms: _Validator[list[str] | None] = _Validator() + """:external:ref:`core-metadata-platform`""" + supported_platforms: _Validator[list[str] | None] = _Validator(added="1.1") + """:external:ref:`core-metadata-supported-platform`""" + summary: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" + description: _Validator[str | None] = _Validator() # TODO 2.1: can be in body + """:external:ref:`core-metadata-description`""" + description_content_type: _Validator[str | None] = _Validator(added="2.1") + """:external:ref:`core-metadata-description-content-type` (validated)""" + keywords: _Validator[list[str] | None] = _Validator() + """:external:ref:`core-metadata-keywords`""" + home_page: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-home-page`""" + download_url: _Validator[str | None] = _Validator(added="1.1") + """:external:ref:`core-metadata-download-url`""" + author: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-author`""" + author_email: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-author-email`""" + maintainer: _Validator[str | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer`""" + maintainer_email: _Validator[str | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer-email`""" + license: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-license`""" + license_expression: _Validator[NormalizedLicenseExpression | None] = _Validator( + added="2.4" + ) + """:external:ref:`core-metadata-license-expression`""" + license_files: _Validator[list[str] | None] = _Validator(added="2.4") + """:external:ref:`core-metadata-license-file`""" + classifiers: _Validator[list[str] | None] = _Validator(added="1.1") + """:external:ref:`core-metadata-classifier`""" + requires_dist: _Validator[list[requirements.Requirement] | None] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-dist`""" + requires_python: _Validator[specifiers.SpecifierSet | None] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-python`""" + # Because `Requires-External` allows for non-PEP 440 version specifiers, we + # don't do any processing on the values. + requires_external: _Validator[list[str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-external`""" + project_urls: _Validator[dict[str, str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-project-url`""" + # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation + # regardless of metadata version. + provides_extra: _Validator[list[utils.NormalizedName] | None] = _Validator( + added="2.1", + ) + """:external:ref:`core-metadata-provides-extra`""" + provides_dist: _Validator[list[str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-provides-dist`""" + obsoletes_dist: _Validator[list[str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-obsoletes-dist`""" + requires: _Validator[list[str] | None] = _Validator(added="1.1") + """``Requires`` (deprecated)""" + provides: _Validator[list[str] | None] = _Validator(added="1.1") + """``Provides`` (deprecated)""" + obsoletes: _Validator[list[str] | None] = _Validator(added="1.1") + """``Obsoletes`` (deprecated)""" diff --git a/vllm/lib/python3.10/site-packages/packaging/py.typed b/vllm/lib/python3.10/site-packages/packaging/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/packaging/requirements.py b/vllm/lib/python3.10/site-packages/packaging/requirements.py new file mode 100644 index 0000000000000000000000000000000000000000..4e068c9567def3564f238a76fe7ab46b569f33e5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/requirements.py @@ -0,0 +1,91 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import annotations + +from typing import Any, Iterator + +from ._parser import parse_requirement as _parse_requirement +from ._tokenizer import ParserSyntaxError +from .markers import Marker, _normalize_extra_values +from .specifiers import SpecifierSet +from .utils import canonicalize_name + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + parsed = _parse_requirement(requirement_string) + except ParserSyntaxError as e: + raise InvalidRequirement(str(e)) from e + + self.name: str = parsed.name + self.url: str | None = parsed.url or None + self.extras: set[str] = set(parsed.extras or []) + self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) + self.marker: Marker | None = None + if parsed.marker is not None: + self.marker = Marker.__new__(Marker) + self.marker._markers = _normalize_extra_values(parsed.marker) + + def _iter_parts(self, name: str) -> Iterator[str]: + yield name + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + yield f"[{formatted_extras}]" + + if self.specifier: + yield str(self.specifier) + + if self.url: + yield f"@ {self.url}" + if self.marker: + yield " " + + if self.marker: + yield f"; {self.marker}" + + def __str__(self) -> str: + return "".join(self._iter_parts(self.name)) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash( + ( + self.__class__.__name__, + *self._iter_parts(canonicalize_name(self.name)), + ) + ) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Requirement): + return NotImplemented + + return ( + canonicalize_name(self.name) == canonicalize_name(other.name) + and self.extras == other.extras + and self.specifier == other.specifier + and self.url == other.url + and self.marker == other.marker + ) diff --git a/vllm/lib/python3.10/site-packages/packaging/specifiers.py b/vllm/lib/python3.10/site-packages/packaging/specifiers.py new file mode 100644 index 0000000000000000000000000000000000000000..b30926af8bf4f47efe98eea44d5ded4cb6f7e07d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/specifiers.py @@ -0,0 +1,1020 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier + from packaging.version import Version +""" + +from __future__ import annotations + +import abc +import itertools +import re +from typing import Callable, Iterable, Iterator, TypeVar, Union + +from .utils import canonicalize_version +from .version import Version + +UnparsedVersion = Union[Version, str] +UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) +CallableOperator = Callable[[Version, str], bool] + + +def _coerce_version(version: UnparsedVersion) -> Version: + if not isinstance(version, Version): + version = Version(version) + return version + + +class InvalidSpecifier(ValueError): + """ + Raised when attempting to create a :class:`Specifier` with a specifier + string that is invalid. + + >>> Specifier("lolwat") + Traceback (most recent call last): + ... + packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier-like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier-like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier-like + objects are equal. + + :param other: The other object to check against. + """ + + @property + @abc.abstractmethod + def prereleases(self) -> bool | None: + """Whether or not pre-releases as a whole are allowed. + + This can be set to either ``True`` or ``False`` to explicitly enable or disable + prereleases or it can be set to ``None`` (the default) to use default semantics. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """Setter for :attr:`prereleases`. + + :param value: The value to set. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: bool | None = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None + ) -> Iterator[UnparsedVersionVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class Specifier(BaseSpecifier): + """This class abstracts handling of version specifiers. + + .. tip:: + + It is generally not required to instantiate this manually. You should instead + prefer to work with :class:`SpecifierSet` instead, which can parse + comma-separated version specifiers (which is what package metadata contains). + """ + + _operator_regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + """ + _version_regex_str = r""" + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s;)]* # The arbitrary version can be just about anything, + # we match everything except for whitespace, a + # semi-colon for marker support, and a closing paren + # since versions can be enclosed in them. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + + # You cannot use a wild card and a pre-release, post-release, a dev or + # local version together so group them with a | and make them optional. + (?: + \.\* # Wild card syntax of .* + | + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + def __init__(self, spec: str = "", prereleases: bool | None = None) -> None: + """Initialize a Specifier instance. + + :param spec: + The string representation of a specifier which will be parsed and + normalized before use. + :param prereleases: + This tells the specifier if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + :raises InvalidSpecifier: + If the given specifier is invalid (i.e. bad syntax). + """ + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: {spec!r}") + + self._spec: tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 + @property # type: ignore[override] + def prereleases(self) -> bool: + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "===", ">", "<"]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if Version(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + @property + def operator(self) -> str: + """The operator of this specifier. + + >>> Specifier("==1.2.3").operator + '==' + """ + return self._spec[0] + + @property + def version(self) -> str: + """The version of this specifier. + + >>> Specifier("==1.2.3").version + '1.2.3' + """ + return self._spec[1] + + def __repr__(self) -> str: + """A representation of the Specifier that shows all internal state. + + >>> Specifier('>=1.0.0') + =1.0.0')> + >>> Specifier('>=1.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> Specifier('>=1.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + """A string representation of the Specifier that can be round-tripped. + + >>> str(Specifier('>=1.0.0')) + '>=1.0.0' + >>> str(Specifier('>=1.0.0', prereleases=False)) + '>=1.0.0' + """ + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> tuple[str, str]: + canonical_version = canonicalize_version( + self._spec[1], + strip_trailing_zero=(self._spec[0] != "~="), + ) + return self._spec[0], canonical_version + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + """Whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") + True + >>> (Specifier("==1.2.3", prereleases=False) == + ... Specifier("==1.2.3", prereleases=True)) + True + >>> Specifier("==1.2.3") == "==1.2.3" + True + >>> Specifier("==1.2.3") == Specifier("==1.2.4") + False + >>> Specifier("==1.2.3") == Specifier("~=1.2.3") + False + """ + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _compare_compatible(self, prospective: Version, spec: str) -> bool: + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = _version_join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + def _compare_equal(self, prospective: Version, spec: str) -> bool: + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + normalized_prospective = canonicalize_version( + prospective.public, strip_trailing_zero=False + ) + # Get the normalized version string ignoring the trailing .* + normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) + # Split the spec out by bangs and dots, and pretend that there is + # an implicit dot in between a release segment and a pre-release segment. + split_spec = _version_split(normalized_spec) + + # Split the prospective version out by bangs and dots, and pretend + # that there is an implicit dot in between a release segment and + # a pre-release segment. + split_prospective = _version_split(normalized_prospective) + + # 0-pad the prospective version before shortening it to get the correct + # shortened version. + padded_prospective, _ = _pad_version(split_prospective, split_spec) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = padded_prospective[: len(split_spec)] + + return shortened_prospective == split_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + def _compare_not_equal(self, prospective: Version, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + def __contains__(self, item: str | Version) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in Specifier(">=1.2.3") + True + >>> Version("1.2.3") in Specifier(">=1.2.3") + True + >>> "1.0.0" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) + True + """ + return self.contains(item) + + def contains(self, item: UnparsedVersion, prereleases: bool | None = None) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this Specifier. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> Specifier(">=1.2.3").contains("1.2.3") + True + >>> Specifier(">=1.2.3").contains(Version("1.2.3")) + True + >>> Specifier(">=1.2.3").contains("1.0.0") + False + >>> Specifier(">=1.2.3").contains("1.3.0a1") + False + >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") + True + >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) + True + """ + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version, this allows us to have a shortcut for + # "2.0" in Specifier(">=2") + normalized_item = _coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifier. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(Specifier().contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) + ['1.2.3', '1.3', ] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) + ['1.5a1'] + >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + """ + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = _coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> list[str]: + """Split version into components. + + The split components are intended for version comparison. The logic does + not attempt to retain the original version string, so joining the + components back with :func:`_version_join` may not produce the original + version string. + """ + result: list[str] = [] + + epoch, _, rest = version.rpartition("!") + result.append(epoch or "0") + + for item in rest.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _version_join(components: list[str]) -> str: + """Join split version components into a version string. + + This function assumes the input came from :func:`_version_split`, where the + first component must be the epoch (either empty or numeric), and all other + components numeric. + """ + epoch, *rest = components + return f"{epoch}!{'.'.join(rest)}" + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: list[str], right: list[str]) -> tuple[list[str], list[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return ( + list(itertools.chain.from_iterable(left_split)), + list(itertools.chain.from_iterable(right_split)), + ) + + +class SpecifierSet(BaseSpecifier): + """This class abstracts handling of a set of version specifiers. + + It can be passed a single specifier (``>=3.0``), a comma-separated list of + specifiers (``>=3.0,!=3.1``), or no specifier at all. + """ + + def __init__( + self, + specifiers: str | Iterable[Specifier] = "", + prereleases: bool | None = None, + ) -> None: + """Initialize a SpecifierSet instance. + + :param specifiers: + The string representation of a specifier or a comma-separated list of + specifiers which will be parsed and normalized before use. + May also be an iterable of ``Specifier`` instances, which will be used + as is. + :param prereleases: + This tells the SpecifierSet if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + + :raises InvalidSpecifier: + If the given ``specifiers`` are not parseable than this exception will be + raised. + """ + + if isinstance(specifiers, str): + # Split on `,` to break each individual specifier into its own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Make each individual specifier a Specifier and save in a frozen set + # for later. + self._specs = frozenset(map(Specifier, split_specifiers)) + else: + # Save the supplied specifiers in a frozen set. + self._specs = frozenset(specifiers) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + @property + def prereleases(self) -> bool | None: + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __repr__(self) -> str: + """A representation of the specifier set that shows all internal state. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> SpecifierSet('>=1.0.0,!=2.0.0') + =1.0.0')> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"" + + def __str__(self) -> str: + """A string representation of the specifier set that can be round-tripped. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) + '!=1.0.1,>=1.0.0' + >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) + '!=1.0.1,>=1.0.0' + """ + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: SpecifierSet | str) -> SpecifierSet: + """Return a SpecifierSet which is a combination of the two sets. + + :param other: The other object to combine with. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' + =1.0.0')> + >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') + =1.0.0')> + """ + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + """Whether or not the two SpecifierSet-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == + ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") + False + """ + if isinstance(other, (str, Specifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __len__(self) -> int: + """Returns the number of specifiers in this specifier set.""" + return len(self._specs) + + def __iter__(self) -> Iterator[Specifier]: + """ + Returns an iterator over all the underlying :class:`Specifier` instances + in this specifier set. + + >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) + [, =1.0.0')>] + """ + return iter(self._specs) + + def __contains__(self, item: UnparsedVersion) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, + item: UnparsedVersion, + prereleases: bool | None = None, + installed: bool | None = None, + ) -> bool: + """Return whether or not the item is contained in this SpecifierSet. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this SpecifierSet. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) + True + """ + # Ensure that our item is a Version instance. + if not isinstance(item, Version): + item = Version(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + if installed and item.is_prerelease: + item = Version(item.base_version) + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifiers in this set. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) + ['1.3', ] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) + [] + >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + + An "empty" SpecifierSet will filter items based on the presence of prerelease + versions in the set. + + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet("").filter(["1.5a1"])) + ['1.5a1'] + >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + """ + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iter(iterable) + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases. + else: + filtered: list[UnparsedVersionVar] = [] + found_prereleases: list[UnparsedVersionVar] = [] + + for item in iterable: + parsed_version = _coerce_version(item) + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return iter(found_prereleases) + + return iter(filtered) diff --git a/vllm/lib/python3.10/site-packages/packaging/tags.py b/vllm/lib/python3.10/site-packages/packaging/tags.py new file mode 100644 index 0000000000000000000000000000000000000000..f5903402abb5a0aed37bb23914f678ef7e34a554 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/tags.py @@ -0,0 +1,617 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import annotations + +import logging +import platform +import re +import struct +import subprocess +import sys +import sysconfig +from importlib.machinery import EXTENSION_SUFFIXES +from typing import ( + Iterable, + Iterator, + Sequence, + Tuple, + cast, +) + +from . import _manylinux, _musllinux + +logger = logging.getLogger(__name__) + +PythonVersion = Sequence[int] +AppleVersion = Tuple[int, int] + +INTERPRETER_SHORT_NAMES: dict[str, str] = { + "python": "py", # Generic. + "cpython": "cp", + "pypy": "pp", + "ironpython": "ip", + "jython": "jy", +} + + +_32_BIT_INTERPRETER = struct.calcsize("P") == 4 + + +class Tag: + """ + A representation of the tag triple for a wheel. + + Instances are considered immutable and thus are hashable. Equality checking + is also supported. + """ + + __slots__ = ["_abi", "_hash", "_interpreter", "_platform"] + + def __init__(self, interpreter: str, abi: str, platform: str) -> None: + self._interpreter = interpreter.lower() + self._abi = abi.lower() + self._platform = platform.lower() + # The __hash__ of every single element in a Set[Tag] will be evaluated each time + # that a set calls its `.disjoint()` method, which may be called hundreds of + # times when scanning a page of links for packages with tags matching that + # Set[Tag]. Pre-computing the value here produces significant speedups for + # downstream consumers. + self._hash = hash((self._interpreter, self._abi, self._platform)) + + @property + def interpreter(self) -> str: + return self._interpreter + + @property + def abi(self) -> str: + return self._abi + + @property + def platform(self) -> str: + return self._platform + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Tag): + return NotImplemented + + return ( + (self._hash == other._hash) # Short-circuit ASAP for perf reasons. + and (self._platform == other._platform) + and (self._abi == other._abi) + and (self._interpreter == other._interpreter) + ) + + def __hash__(self) -> int: + return self._hash + + def __str__(self) -> str: + return f"{self._interpreter}-{self._abi}-{self._platform}" + + def __repr__(self) -> str: + return f"<{self} @ {id(self)}>" + + +def parse_tag(tag: str) -> frozenset[Tag]: + """ + Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. + + Returning a set is required due to the possibility that the tag is a + compressed tag set. + """ + tags = set() + interpreters, abis, platforms = tag.split("-") + for interpreter in interpreters.split("."): + for abi in abis.split("."): + for platform_ in platforms.split("."): + tags.add(Tag(interpreter, abi, platform_)) + return frozenset(tags) + + +def _get_config_var(name: str, warn: bool = False) -> int | str | None: + value: int | str | None = sysconfig.get_config_var(name) + if value is None and warn: + logger.debug( + "Config variable '%s' is unset, Python ABI tag may be incorrect", name + ) + return value + + +def _normalize_string(string: str) -> str: + return string.replace(".", "_").replace("-", "_").replace(" ", "_") + + +def _is_threaded_cpython(abis: list[str]) -> bool: + """ + Determine if the ABI corresponds to a threaded (`--disable-gil`) build. + + The threaded builds are indicated by a "t" in the abiflags. + """ + if len(abis) == 0: + return False + # expect e.g., cp313 + m = re.match(r"cp\d+(.*)", abis[0]) + if not m: + return False + abiflags = m.group(1) + return "t" in abiflags + + +def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool: + """ + Determine if the Python version supports abi3. + + PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`) + builds do not support abi3. + """ + return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading + + +def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> list[str]: + py_version = tuple(py_version) # To allow for version comparison. + abis = [] + version = _version_nodot(py_version[:2]) + threading = debug = pymalloc = ucs4 = "" + with_debug = _get_config_var("Py_DEBUG", warn) + has_refcount = hasattr(sys, "gettotalrefcount") + # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled + # extension modules is the best option. + # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 + has_ext = "_d.pyd" in EXTENSION_SUFFIXES + if with_debug or (with_debug is None and (has_refcount or has_ext)): + debug = "d" + if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn): + threading = "t" + if py_version < (3, 8): + with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) + if with_pymalloc or with_pymalloc is None: + pymalloc = "m" + if py_version < (3, 3): + unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) + if unicode_size == 4 or ( + unicode_size is None and sys.maxunicode == 0x10FFFF + ): + ucs4 = "u" + elif debug: + # Debug builds can also load "normal" extension modules. + # We can also assume no UCS-4 or pymalloc requirement. + abis.append(f"cp{version}{threading}") + abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}") + return abis + + +def cpython_tags( + python_version: PythonVersion | None = None, + abis: Iterable[str] | None = None, + platforms: Iterable[str] | None = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a CPython interpreter. + + The tags consist of: + - cp-- + - cp-abi3- + - cp-none- + - cp-abi3- # Older Python versions down to 3.2. + + If python_version only specifies a major version then user-provided ABIs and + the 'none' ABItag will be used. + + If 'abi3' or 'none' are specified in 'abis' then they will be yielded at + their normal position and not at the beginning. + """ + if not python_version: + python_version = sys.version_info[:2] + + interpreter = f"cp{_version_nodot(python_version[:2])}" + + if abis is None: + if len(python_version) > 1: + abis = _cpython_abis(python_version, warn) + else: + abis = [] + abis = list(abis) + # 'abi3' and 'none' are explicitly handled later. + for explicit_abi in ("abi3", "none"): + try: + abis.remove(explicit_abi) + except ValueError: + pass + + platforms = list(platforms or platform_tags()) + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + threading = _is_threaded_cpython(abis) + use_abi3 = _abi3_applies(python_version, threading) + if use_abi3: + yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) + yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) + + if use_abi3: + for minor_version in range(python_version[1] - 1, 1, -1): + for platform_ in platforms: + version = _version_nodot((python_version[0], minor_version)) + interpreter = f"cp{version}" + yield Tag(interpreter, "abi3", platform_) + + +def _generic_abi() -> list[str]: + """ + Return the ABI tag based on EXT_SUFFIX. + """ + # The following are examples of `EXT_SUFFIX`. + # We want to keep the parts which are related to the ABI and remove the + # parts which are related to the platform: + # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 + # - mac: '.cpython-310-darwin.so' => cp310 + # - win: '.cp310-win_amd64.pyd' => cp310 + # - win: '.pyd' => cp37 (uses _cpython_abis()) + # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 + # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' + # => graalpy_38_native + + ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) + if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": + raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") + parts = ext_suffix.split(".") + if len(parts) < 3: + # CPython3.7 and earlier uses ".pyd" on Windows. + return _cpython_abis(sys.version_info[:2]) + soabi = parts[1] + if soabi.startswith("cpython"): + # non-windows + abi = "cp" + soabi.split("-")[1] + elif soabi.startswith("cp"): + # windows + abi = soabi.split("-")[0] + elif soabi.startswith("pypy"): + abi = "-".join(soabi.split("-")[:2]) + elif soabi.startswith("graalpy"): + abi = "-".join(soabi.split("-")[:3]) + elif soabi: + # pyston, ironpython, others? + abi = soabi + else: + return [] + return [_normalize_string(abi)] + + +def generic_tags( + interpreter: str | None = None, + abis: Iterable[str] | None = None, + platforms: Iterable[str] | None = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a generic interpreter. + + The tags consist of: + - -- + + The "none" ABI will be added if it was not explicitly provided. + """ + if not interpreter: + interp_name = interpreter_name() + interp_version = interpreter_version(warn=warn) + interpreter = "".join([interp_name, interp_version]) + if abis is None: + abis = _generic_abi() + else: + abis = list(abis) + platforms = list(platforms or platform_tags()) + if "none" not in abis: + abis.append("none") + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + +def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: + """ + Yields Python versions in descending order. + + After the latest version, the major-only version will be yielded, and then + all previous versions of that major version. + """ + if len(py_version) > 1: + yield f"py{_version_nodot(py_version[:2])}" + yield f"py{py_version[0]}" + if len(py_version) > 1: + for minor in range(py_version[1] - 1, -1, -1): + yield f"py{_version_nodot((py_version[0], minor))}" + + +def compatible_tags( + python_version: PythonVersion | None = None, + interpreter: str | None = None, + platforms: Iterable[str] | None = None, +) -> Iterator[Tag]: + """ + Yields the sequence of tags that are compatible with a specific version of Python. + + The tags consist of: + - py*-none- + - -none-any # ... if `interpreter` is provided. + - py*-none-any + """ + if not python_version: + python_version = sys.version_info[:2] + platforms = list(platforms or platform_tags()) + for version in _py_interpreter_range(python_version): + for platform_ in platforms: + yield Tag(version, "none", platform_) + if interpreter: + yield Tag(interpreter, "none", "any") + for version in _py_interpreter_range(python_version): + yield Tag(version, "none", "any") + + +def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: + if not is_32bit: + return arch + + if arch.startswith("ppc"): + return "ppc" + + return "i386" + + +def _mac_binary_formats(version: AppleVersion, cpu_arch: str) -> list[str]: + formats = [cpu_arch] + if cpu_arch == "x86_64": + if version < (10, 4): + return [] + formats.extend(["intel", "fat64", "fat32"]) + + elif cpu_arch == "i386": + if version < (10, 4): + return [] + formats.extend(["intel", "fat32", "fat"]) + + elif cpu_arch == "ppc64": + # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? + if version > (10, 5) or version < (10, 4): + return [] + formats.append("fat64") + + elif cpu_arch == "ppc": + if version > (10, 6): + return [] + formats.extend(["fat32", "fat"]) + + if cpu_arch in {"arm64", "x86_64"}: + formats.append("universal2") + + if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: + formats.append("universal") + + return formats + + +def mac_platforms( + version: AppleVersion | None = None, arch: str | None = None +) -> Iterator[str]: + """ + Yields the platform tags for a macOS system. + + The `version` parameter is a two-item tuple specifying the macOS version to + generate platform tags for. The `arch` parameter is the CPU architecture to + generate platform tags for. Both parameters default to the appropriate value + for the current system. + """ + version_str, _, cpu_arch = platform.mac_ver() + if version is None: + version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2]))) + if version == (10, 16): + # When built against an older macOS SDK, Python will report macOS 10.16 + # instead of the real version. + version_str = subprocess.run( + [ + sys.executable, + "-sS", + "-c", + "import platform; print(platform.mac_ver()[0])", + ], + check=True, + env={"SYSTEM_VERSION_COMPAT": "0"}, + stdout=subprocess.PIPE, + text=True, + ).stdout + version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2]))) + else: + version = version + if arch is None: + arch = _mac_arch(cpu_arch) + else: + arch = arch + + if (10, 0) <= version and version < (11, 0): + # Prior to Mac OS 11, each yearly release of Mac OS bumped the + # "minor" version number. The major version was always 10. + major_version = 10 + for minor_version in range(version[1], -1, -1): + compat_version = major_version, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + + if version >= (11, 0): + # Starting with Mac OS 11, each yearly release bumps the major version + # number. The minor versions are now the midyear updates. + minor_version = 0 + for major_version in range(version[0], 10, -1): + compat_version = major_version, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + + if version >= (11, 0): + # Mac OS 11 on x86_64 is compatible with binaries from previous releases. + # Arm64 support was introduced in 11.0, so no Arm binaries from previous + # releases exist. + # + # However, the "universal2" binary format can have a + # macOS version earlier than 11.0 when the x86_64 part of the binary supports + # that version of macOS. + major_version = 10 + if arch == "x86_64": + for minor_version in range(16, 3, -1): + compat_version = major_version, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + else: + for minor_version in range(16, 3, -1): + compat_version = major_version, minor_version + binary_format = "universal2" + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + + +def ios_platforms( + version: AppleVersion | None = None, multiarch: str | None = None +) -> Iterator[str]: + """ + Yields the platform tags for an iOS system. + + :param version: A two-item tuple specifying the iOS version to generate + platform tags for. Defaults to the current iOS version. + :param multiarch: The CPU architecture+ABI to generate platform tags for - + (the value used by `sys.implementation._multiarch` e.g., + `arm64_iphoneos` or `x84_64_iphonesimulator`). Defaults to the current + multiarch value. + """ + if version is None: + # if iOS is the current platform, ios_ver *must* be defined. However, + # it won't exist for CPython versions before 3.13, which causes a mypy + # error. + _, release, _, _ = platform.ios_ver() # type: ignore[attr-defined, unused-ignore] + version = cast("AppleVersion", tuple(map(int, release.split(".")[:2]))) + + if multiarch is None: + multiarch = sys.implementation._multiarch + multiarch = multiarch.replace("-", "_") + + ios_platform_template = "ios_{major}_{minor}_{multiarch}" + + # Consider any iOS major.minor version from the version requested, down to + # 12.0. 12.0 is the first iOS version that is known to have enough features + # to support CPython. Consider every possible minor release up to X.9. There + # highest the minor has ever gone is 8 (14.8 and 15.8) but having some extra + # candidates that won't ever match doesn't really hurt, and it saves us from + # having to keep an explicit list of known iOS versions in the code. Return + # the results descending order of version number. + + # If the requested major version is less than 12, there won't be any matches. + if version[0] < 12: + return + + # Consider the actual X.Y version that was requested. + yield ios_platform_template.format( + major=version[0], minor=version[1], multiarch=multiarch + ) + + # Consider every minor version from X.0 to the minor version prior to the + # version requested by the platform. + for minor in range(version[1] - 1, -1, -1): + yield ios_platform_template.format( + major=version[0], minor=minor, multiarch=multiarch + ) + + for major in range(version[0] - 1, 11, -1): + for minor in range(9, -1, -1): + yield ios_platform_template.format( + major=major, minor=minor, multiarch=multiarch + ) + + +def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: + linux = _normalize_string(sysconfig.get_platform()) + if not linux.startswith("linux_"): + # we should never be here, just yield the sysconfig one and return + yield linux + return + if is_32bit: + if linux == "linux_x86_64": + linux = "linux_i686" + elif linux == "linux_aarch64": + linux = "linux_armv8l" + _, arch = linux.split("_", 1) + archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch]) + yield from _manylinux.platform_tags(archs) + yield from _musllinux.platform_tags(archs) + for arch in archs: + yield f"linux_{arch}" + + +def _generic_platforms() -> Iterator[str]: + yield _normalize_string(sysconfig.get_platform()) + + +def platform_tags() -> Iterator[str]: + """ + Provides the platform tags for this installation. + """ + if platform.system() == "Darwin": + return mac_platforms() + elif platform.system() == "iOS": + return ios_platforms() + elif platform.system() == "Linux": + return _linux_platforms() + else: + return _generic_platforms() + + +def interpreter_name() -> str: + """ + Returns the name of the running interpreter. + + Some implementations have a reserved, two-letter abbreviation which will + be returned when appropriate. + """ + name = sys.implementation.name + return INTERPRETER_SHORT_NAMES.get(name) or name + + +def interpreter_version(*, warn: bool = False) -> str: + """ + Returns the version of the running interpreter. + """ + version = _get_config_var("py_version_nodot", warn=warn) + if version: + version = str(version) + else: + version = _version_nodot(sys.version_info[:2]) + return version + + +def _version_nodot(version: PythonVersion) -> str: + return "".join(map(str, version)) + + +def sys_tags(*, warn: bool = False) -> Iterator[Tag]: + """ + Returns the sequence of tag triples for the running interpreter. + + The order of the sequence corresponds to priority order for the + interpreter, from most to least important. + """ + + interp_name = interpreter_name() + if interp_name == "cp": + yield from cpython_tags(warn=warn) + else: + yield from generic_tags() + + if interp_name == "pp": + interp = "pp3" + elif interp_name == "cp": + interp = "cp" + interpreter_version(warn=warn) + else: + interp = None + yield from compatible_tags(interpreter=interp) diff --git a/vllm/lib/python3.10/site-packages/packaging/utils.py b/vllm/lib/python3.10/site-packages/packaging/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..23450953df74eccd9c13cd2a955ce09d1f968565 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/utils.py @@ -0,0 +1,163 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import annotations + +import functools +import re +from typing import NewType, Tuple, Union, cast + +from .tags import Tag, parse_tag +from .version import InvalidVersion, Version, _TrimmedRelease + +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) + + +class InvalidName(ValueError): + """ + An invalid distribution name; users should refer to the packaging user guide. + """ + + +class InvalidWheelFilename(ValueError): + """ + An invalid wheel filename was found, users should refer to PEP 427. + """ + + +class InvalidSdistFilename(ValueError): + """ + An invalid sdist filename was found, users should refer to the packaging user guide. + """ + + +# Core metadata spec for `Name` +_validate_regex = re.compile( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE +) +_canonicalize_regex = re.compile(r"[-_.]+") +_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$") +# PEP 427: The build number must start with a digit. +_build_tag_regex = re.compile(r"(\d+)(.*)") + + +def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName: + if validate and not _validate_regex.match(name): + raise InvalidName(f"name is invalid: {name!r}") + # This is taken from PEP 503. + value = _canonicalize_regex.sub("-", name).lower() + return cast(NormalizedName, value) + + +def is_normalized_name(name: str) -> bool: + return _normalized_regex.match(name) is not None + + +@functools.singledispatch +def canonicalize_version( + version: Version | str, *, strip_trailing_zero: bool = True +) -> str: + """ + Return a canonical form of a version as a string. + + >>> canonicalize_version('1.0.1') + '1.0.1' + + Per PEP 625, versions may have multiple canonical forms, differing + only by trailing zeros. + + >>> canonicalize_version('1.0.0') + '1' + >>> canonicalize_version('1.0.0', strip_trailing_zero=False) + '1.0.0' + + Invalid versions are returned unaltered. + + >>> canonicalize_version('foo bar baz') + 'foo bar baz' + """ + return str(_TrimmedRelease(str(version)) if strip_trailing_zero else version) + + +@canonicalize_version.register +def _(version: str, *, strip_trailing_zero: bool = True) -> str: + try: + parsed = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + return canonicalize_version(parsed, strip_trailing_zero=strip_trailing_zero) + + +def parse_wheel_filename( + filename: str, +) -> tuple[NormalizedName, Version, BuildTag, frozenset[Tag]]: + if not filename.endswith(".whl"): + raise InvalidWheelFilename( + f"Invalid wheel filename (extension must be '.whl'): {filename!r}" + ) + + filename = filename[:-4] + dashes = filename.count("-") + if dashes not in (4, 5): + raise InvalidWheelFilename( + f"Invalid wheel filename (wrong number of parts): {filename!r}" + ) + + parts = filename.split("-", dashes - 2) + name_part = parts[0] + # See PEP 427 for the rules on escaping the project name. + if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: + raise InvalidWheelFilename(f"Invalid project name: {filename!r}") + name = canonicalize_name(name_part) + + try: + version = Version(parts[1]) + except InvalidVersion as e: + raise InvalidWheelFilename( + f"Invalid wheel filename (invalid version): {filename!r}" + ) from e + + if dashes == 5: + build_part = parts[2] + build_match = _build_tag_regex.match(build_part) + if build_match is None: + raise InvalidWheelFilename( + f"Invalid build number: {build_part} in {filename!r}" + ) + build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) + else: + build = () + tags = parse_tag(parts[-1]) + return (name, version, build, tags) + + +def parse_sdist_filename(filename: str) -> tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: + raise InvalidSdistFilename( + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename!r}" + ) + + # We are requiring a PEP 440 version, which cannot contain dashes, + # so we split on the last dash. + name_part, sep, version_part = file_stem.rpartition("-") + if not sep: + raise InvalidSdistFilename(f"Invalid sdist filename: {filename!r}") + + name = canonicalize_name(name_part) + + try: + version = Version(version_part) + except InvalidVersion as e: + raise InvalidSdistFilename( + f"Invalid sdist filename (invalid version): {filename!r}" + ) from e + + return (name, version) diff --git a/vllm/lib/python3.10/site-packages/packaging/version.py b/vllm/lib/python3.10/site-packages/packaging/version.py new file mode 100644 index 0000000000000000000000000000000000000000..c9bbda20e463b8d9389ecd65f74af33810a02bdd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/packaging/version.py @@ -0,0 +1,582 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.version import parse, Version +""" + +from __future__ import annotations + +import itertools +import re +from typing import Any, Callable, NamedTuple, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["VERSION_PATTERN", "InvalidVersion", "Version", "parse"] + +LocalType = Tuple[Union[int, str], ...] + +CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]] +CmpLocalType = Union[ + NegativeInfinityType, + Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...], +] +CmpKey = Tuple[ + int, + Tuple[int, ...], + CmpPrePostDevType, + CmpPrePostDevType, + CmpPrePostDevType, + CmpLocalType, +] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] + + +class _Version(NamedTuple): + epoch: int + release: tuple[int, ...] + dev: tuple[str, int] | None + pre: tuple[str, int] | None + post: tuple[str, int] | None + local: LocalType | None + + +def parse(version: str) -> Version: + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. + """ + return Version(version) + + +class InvalidVersion(ValueError): + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' + """ + + +class _BaseVersion: + _key: tuple[Any, ...] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +_VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?Palpha|a|beta|b|preview|pre|c|rc)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
+
+class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    
+    >>> v2
+    
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
+
+    def __init__(self, version: str) -> None:
+        """Initialize a Version object.
+
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: {version!r}")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        
+        """
+        return f""
+
+    def __str__(self) -> str:
+        """A string representation of the version that can be round-tripped.
+
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        return self._version.epoch
+
+    @property
+    def release(self) -> tuple[int, ...]:
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        return self._version.release
+
+    @property
+    def pre(self) -> tuple[str, int] | None:
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        return self._version.pre
+
+    @property
+    def post(self) -> int | None:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> int | None:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> str | None:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1!1.2.3dev1+abc").public
+        '1!1.2.3.dev1'
+        """
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3dev1+abc").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+class _TrimmedRelease(Version):
+    @property
+    def release(self) -> tuple[int, ...]:
+        """
+        Release segment without any trailing zeros.
+
+        >>> _TrimmedRelease('1.0.0').release
+        (1,)
+        >>> _TrimmedRelease('0.0').release
+        (0,)
+        """
+        rel = super().release
+        nonzeros = (index for index, val in enumerate(rel) if val)
+        last_nonzero = max(nonzeros, default=0)
+        return rel[: last_nonzero + 1]
+
+
+def _parse_letter_version(
+    letter: str | None, number: str | bytes | SupportsInt | None
+) -> tuple[str, int] | None:
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+
+    assert not letter
+    if number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str | None) -> LocalType | None:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: tuple[int, ...],
+    pre: tuple[str, int] | None,
+    post: tuple[str, int] | None,
+    dev: tuple[str, int] | None,
+    local: LocalType | None,
+) -> CmpKey:
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: CmpPrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: CmpPrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: CmpPrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: CmpLocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/vllm/lib/python3.10/site-packages/pyzmq.libs/libsodium-1b1f72d5.so.26.1.0 b/vllm/lib/python3.10/site-packages/pyzmq.libs/libsodium-1b1f72d5.so.26.1.0
new file mode 100644
index 0000000000000000000000000000000000000000..ed71804c40ee71e38a4d19b8f4791e342d9cfdfe
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/pyzmq.libs/libsodium-1b1f72d5.so.26.1.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ede08800310af6ce9b4f4e34147b80e92547ae37c7e7ec571ad9509c6afda500
+size 1246721
diff --git a/vllm/lib/python3.10/site-packages/pyzmq.libs/libzmq-a430b4ce.so.5.2.5 b/vllm/lib/python3.10/site-packages/pyzmq.libs/libzmq-a430b4ce.so.5.2.5
new file mode 100644
index 0000000000000000000000000000000000000000..773de77bc152f22136c24f1b2a978542c6c82446
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/pyzmq.libs/libzmq-a430b4ce.so.5.2.5
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:febf5058ff086ef5213a78b4b585e4c0e6c93ae8fcf411eac616c7de70744ac7
+size 970897
diff --git a/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/LICENSE b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..c4c8162f132e10878b53699fe0cae796ed0f0c03
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018-2024 Functional Software, Inc. dba Sentry
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/METADATA b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..0ff35552ef57e7989ef9154c336c04d8871abc0f
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/METADATA
@@ -0,0 +1,230 @@
+Metadata-Version: 2.1
+Name: sentry-sdk
+Version: 2.19.2
+Summary: Python client for Sentry (https://sentry.io)
+Home-page: https://github.com/getsentry/sentry-python
+Author: Sentry Team and Contributors
+Author-email: hello@sentry.io
+License: MIT
+Project-URL: Documentation, https://docs.sentry.io/platforms/python/
+Project-URL: Changelog, https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.6
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: urllib3>=1.26.11
+Requires-Dist: certifi
+Provides-Extra: aiohttp
+Requires-Dist: aiohttp>=3.5; extra == "aiohttp"
+Provides-Extra: anthropic
+Requires-Dist: anthropic>=0.16; extra == "anthropic"
+Provides-Extra: arq
+Requires-Dist: arq>=0.23; extra == "arq"
+Provides-Extra: asyncpg
+Requires-Dist: asyncpg>=0.23; extra == "asyncpg"
+Provides-Extra: beam
+Requires-Dist: apache-beam>=2.12; extra == "beam"
+Provides-Extra: bottle
+Requires-Dist: bottle>=0.12.13; extra == "bottle"
+Provides-Extra: celery
+Requires-Dist: celery>=3; extra == "celery"
+Provides-Extra: celery-redbeat
+Requires-Dist: celery-redbeat>=2; extra == "celery-redbeat"
+Provides-Extra: chalice
+Requires-Dist: chalice>=1.16.0; extra == "chalice"
+Provides-Extra: clickhouse-driver
+Requires-Dist: clickhouse-driver>=0.2.0; extra == "clickhouse-driver"
+Provides-Extra: django
+Requires-Dist: django>=1.8; extra == "django"
+Provides-Extra: falcon
+Requires-Dist: falcon>=1.4; extra == "falcon"
+Provides-Extra: fastapi
+Requires-Dist: fastapi>=0.79.0; extra == "fastapi"
+Provides-Extra: flask
+Requires-Dist: flask>=0.11; extra == "flask"
+Requires-Dist: blinker>=1.1; extra == "flask"
+Requires-Dist: markupsafe; extra == "flask"
+Provides-Extra: grpcio
+Requires-Dist: grpcio>=1.21.1; extra == "grpcio"
+Requires-Dist: protobuf>=3.8.0; extra == "grpcio"
+Provides-Extra: http2
+Requires-Dist: httpcore[http2]==1.*; extra == "http2"
+Provides-Extra: httpx
+Requires-Dist: httpx>=0.16.0; extra == "httpx"
+Provides-Extra: huey
+Requires-Dist: huey>=2; extra == "huey"
+Provides-Extra: huggingface-hub
+Requires-Dist: huggingface_hub>=0.22; extra == "huggingface-hub"
+Provides-Extra: langchain
+Requires-Dist: langchain>=0.0.210; extra == "langchain"
+Provides-Extra: launchdarkly
+Requires-Dist: launchdarkly-server-sdk>=9.8.0; extra == "launchdarkly"
+Provides-Extra: litestar
+Requires-Dist: litestar>=2.0.0; extra == "litestar"
+Provides-Extra: loguru
+Requires-Dist: loguru>=0.5; extra == "loguru"
+Provides-Extra: openai
+Requires-Dist: openai>=1.0.0; extra == "openai"
+Requires-Dist: tiktoken>=0.3.0; extra == "openai"
+Provides-Extra: openfeature
+Requires-Dist: openfeature-sdk>=0.7.1; extra == "openfeature"
+Provides-Extra: opentelemetry
+Requires-Dist: opentelemetry-distro>=0.35b0; extra == "opentelemetry"
+Provides-Extra: opentelemetry-experimental
+Requires-Dist: opentelemetry-distro; extra == "opentelemetry-experimental"
+Provides-Extra: pure-eval
+Requires-Dist: pure_eval; extra == "pure-eval"
+Requires-Dist: executing; extra == "pure-eval"
+Requires-Dist: asttokens; extra == "pure-eval"
+Provides-Extra: pymongo
+Requires-Dist: pymongo>=3.1; extra == "pymongo"
+Provides-Extra: pyspark
+Requires-Dist: pyspark>=2.4.4; extra == "pyspark"
+Provides-Extra: quart
+Requires-Dist: quart>=0.16.1; extra == "quart"
+Requires-Dist: blinker>=1.1; extra == "quart"
+Provides-Extra: rq
+Requires-Dist: rq>=0.6; extra == "rq"
+Provides-Extra: sanic
+Requires-Dist: sanic>=0.8; extra == "sanic"
+Provides-Extra: sqlalchemy
+Requires-Dist: sqlalchemy>=1.2; extra == "sqlalchemy"
+Provides-Extra: starlette
+Requires-Dist: starlette>=0.19.1; extra == "starlette"
+Provides-Extra: starlite
+Requires-Dist: starlite>=1.48; extra == "starlite"
+Provides-Extra: tornado
+Requires-Dist: tornado>=6; extra == "tornado"
+
+
+  Sentry for Python
+
+
+
+_Bad software is everywhere, and we're tired of it. Sentry is on a mission to help developers write better software faster, so we can get back to enjoying technology. If you want to join us, [**check out our open positions**](https://sentry.io/careers/)_.
+
+# Official Sentry SDK for Python
+
+[![Build Status](https://github.com/getsentry/sentry-python/actions/workflows/ci.yml/badge.svg)](https://github.com/getsentry/sentry-python/actions/workflows/ci.yml)
+[![PyPi page link -- version](https://img.shields.io/pypi/v/sentry-sdk.svg)](https://pypi.python.org/pypi/sentry-sdk)
+[![Discord](https://img.shields.io/discord/621778831602221064)](https://discord.gg/cWnMQeA)
+
+Welcome to the official Python SDK for **[Sentry](http://sentry.io/)**!
+
+## Getting Started
+
+### Installation
+
+Getting Sentry into your project is straightforward. Just run this command in your terminal:
+
+```bash
+pip install --upgrade sentry-sdk
+```
+
+### Basic Configuration
+
+Here’s a quick configuration example to get Sentry up and running:
+
+```python
+import sentry_sdk
+
+sentry_sdk.init(
+    "https://12927b5f211046b575ee51fd8b1ac34f@o1.ingest.sentry.io/1",  # Your DSN here
+
+    # Set traces_sample_rate to 1.0 to capture 100%
+    # of transactions for performance monitoring.
+    traces_sample_rate=1.0,
+)
+```
+
+With this configuration, Sentry will monitor for exceptions and performance issues.
+
+### Quick Usage Example
+
+To generate some events that will show up in Sentry, you can log messages or capture errors:
+
+```python
+from sentry_sdk import capture_message
+capture_message("Hello Sentry!")  # You'll see this in your Sentry dashboard.
+
+raise ValueError("Oops, something went wrong!")  # This will create an error event in Sentry.
+```
+
+#### Explore the Docs
+
+For more details on advanced usage, integrations, and customization, check out the full documentation:
+
+- [Official SDK Docs](https://docs.sentry.io/platforms/python/)
+- [API Reference](https://getsentry.github.io/sentry-python/)
+
+## Integrations
+
+Sentry integrates with many popular Python libraries and frameworks, including:
+
+- [Django](https://docs.sentry.io/platforms/python/integrations/django/)
+- [Flask](https://docs.sentry.io/platforms/python/integrations/flask/)
+- [FastAPI](https://docs.sentry.io/platforms/python/integrations/fastapi/)
+- [Celery](https://docs.sentry.io/platforms/python/integrations/celery/)
+- [AWS Lambda](https://docs.sentry.io/platforms/python/integrations/aws-lambda/)
+
+Want more? [Check out the full list of integrations](https://docs.sentry.io/platforms/python/integrations/).
+
+### Rolling Your Own Integration?
+
+If you want to create a new integration or improve an existing one, we’d welcome your contributions! Please read our [contributing guide](https://github.com/getsentry/sentry-python/blob/master/CONTRIBUTING.md) before starting.
+
+## Migrating Between Versions?
+
+### From `1.x` to `2.x`
+
+If you're using the older `1.x` version of the SDK, now's the time to upgrade to `2.x`. It includes significant upgrades and new features. Check our [migration guide](https://docs.sentry.io/platforms/python/migration/1.x-to-2.x) for assistance.
+
+### From `raven-python`
+
+Using the legacy `raven-python` client? It's now in maintenance mode, and we recommend migrating to the new SDK for an improved experience. Get all the details in our [migration guide](https://docs.sentry.io/platforms/python/migration/raven-to-sentry-sdk/).
+
+## Want to Contribute?
+
+We’d love your help in improving the Sentry SDK! Whether it’s fixing bugs, adding features, or enhancing documentation, every contribution is valuable.
+
+For details on how to contribute, please check out [CONTRIBUTING.md](CONTRIBUTING.md) and explore the [open issues](https://github.com/getsentry/sentry-python/issues).
+
+## Need Help?
+
+If you encounter issues or need help setting up or configuring the SDK, don’t hesitate to reach out to the [Sentry Community on Discord](https://discord.com/invite/Ww9hbqr). There is a ton of great people there ready to help!
+
+## Resources
+
+Here are additional resources to help you make the most of Sentry:
+
+- [![Documentation](https://img.shields.io/badge/documentation-sentry.io-green.svg)](https://docs.sentry.io/quickstart/) – Official documentation to get started.
+- [![Discord](https://img.shields.io/discord/621778831602221064)](https://discord.gg/Ww9hbqr) – Join our Discord community.
+- [![Twitter Follow](https://img.shields.io/twitter/follow/getsentry?label=getsentry&style=social)](https://twitter.com/intent/follow?screen_name=getsentry) – Follow us on X (Twitter) for updates.
+- [![Stack Overflow](https://img.shields.io/badge/stack%20overflow-sentry-green.svg)](http://stackoverflow.com/questions/tagged/sentry) – Questions and answers related to Sentry.
+
+## License
+
+The SDK is open-source and available under the MIT license. Check out the [LICENSE](LICENSE) file for more information.
+
+---
+
+Thanks to everyone who has helped improve the SDK!
+
+
+  
+
diff --git a/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/RECORD b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..05da07607f561faab4311a6e6bc285f68ca711be
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/RECORD
@@ -0,0 +1,289 @@
+sentry_sdk-2.19.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+sentry_sdk-2.19.2.dist-info/LICENSE,sha256=bX7GLcIOi5LLiLifs7Ve6FdlBfPhr6V5UlSPbupdAI4,1098
+sentry_sdk-2.19.2.dist-info/METADATA,sha256=2ZEq9KHzzUmIn1Q_fVA1Sn2s87N8aFphUomS6iP3OeU,9852
+sentry_sdk-2.19.2.dist-info/RECORD,,
+sentry_sdk-2.19.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sentry_sdk-2.19.2.dist-info/WHEEL,sha256=pxeNX5JdtCe58PUSYP9upmc7jdRPgvT0Gm9kb1SHlVw,109
+sentry_sdk-2.19.2.dist-info/entry_points.txt,sha256=qacZEz40UspQZD1IukCXykx0JtImqGDOctS5KfOLTko,91
+sentry_sdk-2.19.2.dist-info/top_level.txt,sha256=XrQz30XE9FKXSY_yGLrd9bsv2Rk390GTDJOSujYaMxI,11
+sentry_sdk/__init__.py,sha256=ywM5WQA3Qy4500dumhgHDSNWwVmMikmOIdhIvmAaTMg,1179
+sentry_sdk/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/__pycache__/_compat.cpython-310.pyc,,
+sentry_sdk/__pycache__/_init_implementation.cpython-310.pyc,,
+sentry_sdk/__pycache__/_lru_cache.cpython-310.pyc,,
+sentry_sdk/__pycache__/_queue.cpython-310.pyc,,
+sentry_sdk/__pycache__/_types.cpython-310.pyc,,
+sentry_sdk/__pycache__/_werkzeug.cpython-310.pyc,,
+sentry_sdk/__pycache__/api.cpython-310.pyc,,
+sentry_sdk/__pycache__/attachments.cpython-310.pyc,,
+sentry_sdk/__pycache__/client.cpython-310.pyc,,
+sentry_sdk/__pycache__/consts.cpython-310.pyc,,
+sentry_sdk/__pycache__/debug.cpython-310.pyc,,
+sentry_sdk/__pycache__/envelope.cpython-310.pyc,,
+sentry_sdk/__pycache__/flag_utils.cpython-310.pyc,,
+sentry_sdk/__pycache__/hub.cpython-310.pyc,,
+sentry_sdk/__pycache__/metrics.cpython-310.pyc,,
+sentry_sdk/__pycache__/monitor.cpython-310.pyc,,
+sentry_sdk/__pycache__/scope.cpython-310.pyc,,
+sentry_sdk/__pycache__/scrubber.cpython-310.pyc,,
+sentry_sdk/__pycache__/serializer.cpython-310.pyc,,
+sentry_sdk/__pycache__/session.cpython-310.pyc,,
+sentry_sdk/__pycache__/sessions.cpython-310.pyc,,
+sentry_sdk/__pycache__/spotlight.cpython-310.pyc,,
+sentry_sdk/__pycache__/tracing.cpython-310.pyc,,
+sentry_sdk/__pycache__/tracing_utils.cpython-310.pyc,,
+sentry_sdk/__pycache__/transport.cpython-310.pyc,,
+sentry_sdk/__pycache__/types.cpython-310.pyc,,
+sentry_sdk/__pycache__/utils.cpython-310.pyc,,
+sentry_sdk/__pycache__/worker.cpython-310.pyc,,
+sentry_sdk/_compat.py,sha256=Pxcg6cUYPiOoXIFfLI_H3ATb7SfrcXOeZdzpeWv3umI,3116
+sentry_sdk/_init_implementation.py,sha256=WL54d8nggjRunBm3XlG-sWSx4yS5lpYYggd7YBWpuVk,2559
+sentry_sdk/_lru_cache.py,sha256=kaL04-sM81nyFlVJRKFoFymLOptgNh-RgCB_HulXLQk,6097
+sentry_sdk/_queue.py,sha256=8oUHpMgSzS40rxfHmjRYlKAfvKtrPvK1FL56RbnJ1iY,11248
+sentry_sdk/_types.py,sha256=iTcUMwzxiIJ8u038tyYDK_WpvDsAA3h5QoUONqsMc8M,6806
+sentry_sdk/_werkzeug.py,sha256=m3GPf-jHd8v3eVOfBHaKw5f0uHoLkXrSO1EcY-8EisY,3734
+sentry_sdk/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sentry_sdk/ai/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/ai/__pycache__/monitoring.cpython-310.pyc,,
+sentry_sdk/ai/__pycache__/utils.cpython-310.pyc,,
+sentry_sdk/ai/monitoring.py,sha256=eUW7j7AFG4sKuTIiAVJFnQ747roJJkVr_uyRla9hinE,4446
+sentry_sdk/ai/utils.py,sha256=QCwhHoptrdXyYroJqzCKxqi0cmrlD9IDDWUcBk6yWZc,950
+sentry_sdk/api.py,sha256=dYwjjLVtFuUT5VYUvc0xuSWZ7wFS9GYpZp6wRD2CX4M,11298
+sentry_sdk/attachments.py,sha256=0Dylhm065O6hNFjB40fWCd5Hg4qWSXndmi1TPWglZkI,3109
+sentry_sdk/client.py,sha256=kFnpnlm1qGvF-tqy1awSg8P5spOB27QFMvvNhcrgDFA,32850
+sentry_sdk/consts.py,sha256=Kg_FRBtGQ9zoQyEQQXyZBm_cgsL2ZThaPIn1qSVDFQM,18757
+sentry_sdk/crons/__init__.py,sha256=3Zt6g1-pZZ12uRKKsC8QLm3XgJ4K1VYxgVpNNUygOZY,221
+sentry_sdk/crons/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/crons/__pycache__/api.cpython-310.pyc,,
+sentry_sdk/crons/__pycache__/consts.cpython-310.pyc,,
+sentry_sdk/crons/__pycache__/decorator.cpython-310.pyc,,
+sentry_sdk/crons/api.py,sha256=s3x6SG-jqIdWS-Kj0sAxJv0nz2A3stdGE1UCtQyRUy4,1559
+sentry_sdk/crons/consts.py,sha256=dXqJk5meBSu5rjlGpqAOlkpACnuUi7svQnAFoy1ZNUU,87
+sentry_sdk/crons/decorator.py,sha256=UrjeIqBCbvsuKrfjGkKJbbLBvjw2TQvDWcTO7WwAmrI,3913
+sentry_sdk/debug.py,sha256=ddBehQlAuQC1sg1XO-N4N3diZ0x0iT5RWJwFdrtcsjw,1019
+sentry_sdk/envelope.py,sha256=wN3vs-BTDRn5LeYYMSmNotxOF8DVtpQo3OLuJ8NAzT0,10178
+sentry_sdk/flag_utils.py,sha256=Onh2gjLnsE3fsRm4Co0nTycY0OT3jaaqVIxwgZavsN8,1224
+sentry_sdk/hub.py,sha256=2QLvEtIYSYV04r8h7VBmQjookILaiBZxZBGTtQKNAWg,25675
+sentry_sdk/integrations/__init__.py,sha256=sRe3GRhZ0MS1yJBvrwCdKXfpYuKFGJux3EEBcQUgNPo,8648
+sentry_sdk/integrations/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/_asgi_common.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/_wsgi_common.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/aiohttp.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/anthropic.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/argv.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/ariadne.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/arq.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/asgi.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/asyncio.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/asyncpg.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/atexit.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/aws_lambda.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/beam.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/boto3.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/bottle.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/chalice.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/clickhouse_driver.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/cloud_resource_context.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/cohere.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/dedupe.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/dramatiq.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/excepthook.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/executing.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/falcon.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/fastapi.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/flask.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/gcp.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/gnu_backtrace.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/gql.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/graphene.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/httpx.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/huey.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/huggingface_hub.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/langchain.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/launchdarkly.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/litestar.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/logging.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/loguru.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/modules.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/openai.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/openfeature.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/pure_eval.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/pymongo.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/pyramid.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/quart.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/ray.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/rq.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/rust_tracing.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/sanic.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/serverless.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/socket.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/sqlalchemy.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/starlette.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/starlite.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/stdlib.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/strawberry.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/sys_exit.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/threading.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/tornado.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/trytond.cpython-310.pyc,,
+sentry_sdk/integrations/__pycache__/wsgi.cpython-310.pyc,,
+sentry_sdk/integrations/_asgi_common.py,sha256=Ypg7IctB3iPPY60ebVlzChzgT8GeGpZ0YH8VvJNDlEY,3187
+sentry_sdk/integrations/_wsgi_common.py,sha256=6qgplAbnCCClrOa5gLYvug6c6CgelyAxOY_6DvClzt8,7422
+sentry_sdk/integrations/aiohttp.py,sha256=WyNjD36JmIrYHjUMo_HGolcLU1cz5R-nkEQvo5hvFPI,13028
+sentry_sdk/integrations/anthropic.py,sha256=dL7BYqwLJCKL6jCDCW8T1nOKIU9BQowg0pwzHdw7H0A,9519
+sentry_sdk/integrations/argv.py,sha256=GIY7TBFETF8Z0fDzqTXEJldt5XXCDdFNZxpGxP7EPaU,911
+sentry_sdk/integrations/ariadne.py,sha256=lqv3il5cuvhE5-BRkv2cljJXx9wCuwZO7N0SKvRwxg0,5954
+sentry_sdk/integrations/arq.py,sha256=lvzh1aIYWx942qnmD9--MFLCSNDOcSg4O5oHkPeXKFI,7816
+sentry_sdk/integrations/asgi.py,sha256=vA5tE9eN4pFambuPj1EVO7wmVAMJuVcSX-of5XqKEoM,12688
+sentry_sdk/integrations/asyncio.py,sha256=nAjrQzGb1v9oM5I8d6FEN6XOU7yBhyu_8v-n87ztLX4,3199
+sentry_sdk/integrations/asyncpg.py,sha256=n1reO054KJrSSqHQOwUp4Z7oYK1H6sQeM1e3-TYD2A8,6532
+sentry_sdk/integrations/atexit.py,sha256=sY46N2hEvtGuT1DBQhirUXHbjgXjXAm7R_sgiectVKw,1652
+sentry_sdk/integrations/aws_lambda.py,sha256=AoJCM-EHXjvOvm3EI26TM8Cm_uhkWc1FwX1KAbnJNAY,17834
+sentry_sdk/integrations/beam.py,sha256=qt35UmkA0ng4VNzmwqH9oz7SESU-is9IjFbTJ21ad4U,5182
+sentry_sdk/integrations/boto3.py,sha256=PT2CT7UoE_ebXiPW9O277v24YcXvALxXvfPkwSSQAWg,4569
+sentry_sdk/integrations/bottle.py,sha256=--ENQfB95C3D189yXfLuqxJ8vq2n6pMhaFUspUH73LU,6598
+sentry_sdk/integrations/celery/__init__.py,sha256=P35MEWnUDYzxuGF7SAvTiWaHAvjB9RsHmBBvNob1Tmk,18665
+sentry_sdk/integrations/celery/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/integrations/celery/__pycache__/beat.cpython-310.pyc,,
+sentry_sdk/integrations/celery/__pycache__/utils.cpython-310.pyc,,
+sentry_sdk/integrations/celery/beat.py,sha256=wIRqiY8lsi-PwAnMwsqVDoAbkMlGDacOE5_xaYgRj_Q,8947
+sentry_sdk/integrations/celery/utils.py,sha256=CMWQOpg9yniEkm3WlXe7YakJfVnLwaY0-jyeo2GX-ZI,1208
+sentry_sdk/integrations/chalice.py,sha256=q3uWOA91JGQCTNG9m0EyGyoB1Z9aWQkdPRu3TPOXr3A,4711
+sentry_sdk/integrations/clickhouse_driver.py,sha256=zmUHPt_s6Cj67H08Mh771wDP8rbHw_Cy2nB1cPzN6zE,5240
+sentry_sdk/integrations/cloud_resource_context.py,sha256=pswdnRDnm_jeFprQ_qM56AIVnEK1ZKVj7tpIzlKNgWY,6744
+sentry_sdk/integrations/cohere.py,sha256=H2z6b92fMKvbqP4OMePWwOEHVXBxXneTdt02XbjDZI0,9266
+sentry_sdk/integrations/dedupe.py,sha256=VczYIzHmpm9xfMwZG4c-xGYWON7T4DqByDtyRkFIuPs,1171
+sentry_sdk/integrations/django/__init__.py,sha256=vj14KsyuV61BnorUGrwSEO4UeSbvGijDKsStUz2cmHQ,24991
+sentry_sdk/integrations/django/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/integrations/django/__pycache__/asgi.cpython-310.pyc,,
+sentry_sdk/integrations/django/__pycache__/caching.cpython-310.pyc,,
+sentry_sdk/integrations/django/__pycache__/middleware.cpython-310.pyc,,
+sentry_sdk/integrations/django/__pycache__/signals_handlers.cpython-310.pyc,,
+sentry_sdk/integrations/django/__pycache__/templates.cpython-310.pyc,,
+sentry_sdk/integrations/django/__pycache__/transactions.cpython-310.pyc,,
+sentry_sdk/integrations/django/__pycache__/views.cpython-310.pyc,,
+sentry_sdk/integrations/django/asgi.py,sha256=FNqxqR5JI7ugPu_GhUvcII-eGaT6vLXHb62RazqeT58,8301
+sentry_sdk/integrations/django/caching.py,sha256=UvYaiI7xrN08Se59vMgJWrSO2BuowOyx3jmXmZoxQJo,6427
+sentry_sdk/integrations/django/middleware.py,sha256=UVKq134w_TyOVPV7WwBW0QjHY-ziDipcZBIDQmjqceE,6009
+sentry_sdk/integrations/django/signals_handlers.py,sha256=iudWetTlzNr5-kx_ew1YwW_vZ0yDChoonwPZB7AYGPo,3098
+sentry_sdk/integrations/django/templates.py,sha256=k3PQrNICGS4wqmFxK3o8KwOlqip7rSIryyc4oa1Wexc,5725
+sentry_sdk/integrations/django/transactions.py,sha256=Axyh3l4UvM96R3go2anVhew3JbrEZ4FSYd1r3UXEcw4,4951
+sentry_sdk/integrations/django/views.py,sha256=bjHwt6TVfYY7yfGUa2Rat9yowkUbQ2bYCcJaXJxP2Ik,3137
+sentry_sdk/integrations/dramatiq.py,sha256=q2v9qTQvveyW6OVjviPosM6REk2fl1PglxTqxTaw_c4,5575
+sentry_sdk/integrations/excepthook.py,sha256=tfwpSQuo1b_OmJbNKPPRh90EUjD_OSE4DqqgYY9PVQI,2408
+sentry_sdk/integrations/executing.py,sha256=5lxBAxO5FypY-zTV03AHncGmolmaHd327-3Vrjzskcc,1994
+sentry_sdk/integrations/falcon.py,sha256=0lCS2jy2MTwrmgyq7gQvaix_hracvrsoMbIpZeEPEQg,9628
+sentry_sdk/integrations/fastapi.py,sha256=E4Uj-aby7TebpIUClLXoIwnjgJ6zLFcOSdl_dP0zj9Q,4726
+sentry_sdk/integrations/flask.py,sha256=_GMTOUyxI9IPMdD0ZVfQqJv1X770s8uL6UgFd1or6uU,8394
+sentry_sdk/integrations/gcp.py,sha256=0HwjtHUFLNzBITzSM3jpPgom8J5dY6udmzayZL4muJs,8286
+sentry_sdk/integrations/gnu_backtrace.py,sha256=cVY7t6gjVjeRf4PdnmZrATFqMOZ7-qJu-84xIXOD5R4,2894
+sentry_sdk/integrations/gql.py,sha256=cQhH-GrScNCk5sH3GVKA2uDDg1qQ90w37S7LI1uzrzQ,4373
+sentry_sdk/integrations/graphene.py,sha256=QKiivVeLuytzCoEANnje8DWBYgglJibZcEh-zhGA368,5177
+sentry_sdk/integrations/grpc/__init__.py,sha256=HIrP4x4rfDrf-JW2IpJezOVo_MZ-Iy-E6Q2ZWwi4o0w,4950
+sentry_sdk/integrations/grpc/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/integrations/grpc/__pycache__/client.cpython-310.pyc,,
+sentry_sdk/integrations/grpc/__pycache__/consts.cpython-310.pyc,,
+sentry_sdk/integrations/grpc/__pycache__/server.cpython-310.pyc,,
+sentry_sdk/integrations/grpc/aio/__init__.py,sha256=2rgrliowpPfLLw40_2YU6ixSzIu_3f8NN3TRplzc8S8,141
+sentry_sdk/integrations/grpc/aio/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/integrations/grpc/aio/__pycache__/client.cpython-310.pyc,,
+sentry_sdk/integrations/grpc/aio/__pycache__/server.cpython-310.pyc,,
+sentry_sdk/integrations/grpc/aio/client.py,sha256=csOwlJb7fg9fBnzeNHxr-qpZEmU97I_jnqkCq6ZLFAs,3322
+sentry_sdk/integrations/grpc/aio/server.py,sha256=M-Xx5yczfAmwb3RH0gBGVsKbGghWVVfjseSq1YYEaLY,4028
+sentry_sdk/integrations/grpc/client.py,sha256=rOPwbU0IO6Ve99atvvwhdVZA8nqBy7_wbH2frb0kIJ0,3382
+sentry_sdk/integrations/grpc/consts.py,sha256=NpsN5gKWDmtGtVK_L5HscgFZBHqjOpmLJLGKyh8GZBA,31
+sentry_sdk/integrations/grpc/server.py,sha256=4Z_66z4Ejujbp5Wv_BrmmRsV2HW8VVTMZ1Rie_ft1bY,2483
+sentry_sdk/integrations/httpx.py,sha256=WwUulqzBLoGGqWUUdQg_MThwQUKzBXnA-m3g_1GOpCE,5866
+sentry_sdk/integrations/huey.py,sha256=sdLLitNJS13AkdZWy60fECl5TY1q9OJcpMV_W1mvfSM,5450
+sentry_sdk/integrations/huggingface_hub.py,sha256=A6uUwGmoGCis5yyb1W55-nSCiylSIKa8v0OvoIES0YI,6537
+sentry_sdk/integrations/langchain.py,sha256=_k34XP9H-5S-mDyF2tiJd-CjiiTDUWKZsmxsfJH5wzQ,17718
+sentry_sdk/integrations/launchdarkly.py,sha256=qLc2OBbwQ2tsMGPeDiHeOx3eP4bBw9PnHiG9JfNQZno,2093
+sentry_sdk/integrations/litestar.py,sha256=mnTRJuR8zNxCysKEYzR5yodADkvpSCol_Utpi_6GmBg,10932
+sentry_sdk/integrations/logging.py,sha256=ifj0Ex0975alGPCiKq-ExVSuKkPm8UeOjEWpKMLiTk8,9597
+sentry_sdk/integrations/loguru.py,sha256=Gzs2ACyMFQZWe7lscsAKbuCVCSSy1OpTPhBrkes2qfA,3001
+sentry_sdk/integrations/modules.py,sha256=vzLx3Erg77Vl4mnUvAgTg_3teAuWy7zylFpAidBI9I0,820
+sentry_sdk/integrations/openai.py,sha256=UI_-Y1NPbCIi0AG2q6ME44vhSDx0SuAXleaB4v_X23Q,15557
+sentry_sdk/integrations/openfeature.py,sha256=1AqqSS7qGAD38VWfLzTK652T5dI9MF0xBTSiXCjfJJM,1458
+sentry_sdk/integrations/opentelemetry/__init__.py,sha256=emNL5aAq_NhK0PZmfX_g4GIdvBS6nHqGrjrIgrdC5m8,229
+sentry_sdk/integrations/opentelemetry/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/integrations/opentelemetry/__pycache__/consts.cpython-310.pyc,,
+sentry_sdk/integrations/opentelemetry/__pycache__/integration.cpython-310.pyc,,
+sentry_sdk/integrations/opentelemetry/__pycache__/propagator.cpython-310.pyc,,
+sentry_sdk/integrations/opentelemetry/__pycache__/span_processor.cpython-310.pyc,,
+sentry_sdk/integrations/opentelemetry/consts.py,sha256=fYL6FIAEfnGZGBhFn5X7aRyHxihSPqAKKqMLhf5Gniw,143
+sentry_sdk/integrations/opentelemetry/integration.py,sha256=CWp6hFFMqoR7wcuwTRbRO-1iVch4A6oOB3RuHWeX9GQ,1791
+sentry_sdk/integrations/opentelemetry/propagator.py,sha256=NpCgv2Ibq1LUrv8-URayZaPGSzz0f1tJsf7aaxAZ5pc,3720
+sentry_sdk/integrations/opentelemetry/span_processor.py,sha256=IBF75ld9zJLNF1-4EYnNBoAS00_XTXjPio86zPX9DLQ,13276
+sentry_sdk/integrations/pure_eval.py,sha256=OvT76XvllQ_J6ABu3jVNU6KD2QAxnXMtTZ7hqhXNhpY,4581
+sentry_sdk/integrations/pymongo.py,sha256=cPpMGEbXHlV6HTHgmIDL1F-x3w7ZMROXVb4eUhLs3bw,6380
+sentry_sdk/integrations/pyramid.py,sha256=IDonzoZvLrH18JL-i_Qpbztc4T3iZNQhWFFv6SAXac8,7364
+sentry_sdk/integrations/quart.py,sha256=pPFB-MVYGj_nfmZK9BRKxJHiqmBVulUnW0nAxI7FDOc,7437
+sentry_sdk/integrations/ray.py,sha256=pzq6azW8BQm8xpx7p_v8FRP9CRkMcRY4CS-ARglUZ_c,4290
+sentry_sdk/integrations/redis/__init__.py,sha256=As5XhbOue-9Sy9d8Vr8cZagbO_Bc0uG8n2G3YNMP7TU,1332
+sentry_sdk/integrations/redis/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/integrations/redis/__pycache__/_async_common.cpython-310.pyc,,
+sentry_sdk/integrations/redis/__pycache__/_sync_common.cpython-310.pyc,,
+sentry_sdk/integrations/redis/__pycache__/consts.cpython-310.pyc,,
+sentry_sdk/integrations/redis/__pycache__/rb.cpython-310.pyc,,
+sentry_sdk/integrations/redis/__pycache__/redis.cpython-310.pyc,,
+sentry_sdk/integrations/redis/__pycache__/redis_cluster.cpython-310.pyc,,
+sentry_sdk/integrations/redis/__pycache__/redis_py_cluster_legacy.cpython-310.pyc,,
+sentry_sdk/integrations/redis/__pycache__/utils.cpython-310.pyc,,
+sentry_sdk/integrations/redis/_async_common.py,sha256=Ay-0XOzDaiFD4pNjq_hO8wU8w2K-ZajFVrypuCYCN5E,3791
+sentry_sdk/integrations/redis/_sync_common.py,sha256=FxWQaPPHNIRcBRBv3unV-vB9Zvs75PdoUmDOaJcYTqk,3581
+sentry_sdk/integrations/redis/consts.py,sha256=jYhloX935YQ1AR9c8giCVo1FpIuGXkGR_Tfn4LOulNU,480
+sentry_sdk/integrations/redis/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sentry_sdk/integrations/redis/modules/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/integrations/redis/modules/__pycache__/caches.cpython-310.pyc,,
+sentry_sdk/integrations/redis/modules/__pycache__/queries.cpython-310.pyc,,
+sentry_sdk/integrations/redis/modules/caches.py,sha256=eY8XY4Nk3QsMM0T26OOYdcNr4bN0Sp9325HkH-hO8cg,4063
+sentry_sdk/integrations/redis/modules/queries.py,sha256=0GxZ98wyjqcc4CwPG3xJ4bSGIGW8wPXChSk5Fxm6kYg,2035
+sentry_sdk/integrations/redis/rb.py,sha256=paykO7EE_DAdiZzCpIqW1MqtBE7mE5UG0JnauFejuzE,806
+sentry_sdk/integrations/redis/redis.py,sha256=1K6seuP6ttEdscKLFtEYEu9vkDRuANCsxWVeDISsGsg,1702
+sentry_sdk/integrations/redis/redis_cluster.py,sha256=D-b_wnX4sgxW4qxJP2kKe8ArJRvEtqrLQNYyStl5D6s,3333
+sentry_sdk/integrations/redis/redis_py_cluster_legacy.py,sha256=pz5pg0AxdHPZWt0jMQRDPH_9jdh0i3KoDPbNUyavIro,1585
+sentry_sdk/integrations/redis/utils.py,sha256=EeUdhTU6rTsNUtqRW5kWZTWYF8Ct1wTvIRKXI6y63-8,3956
+sentry_sdk/integrations/rq.py,sha256=v7dDc_wnWoza40xqhZmlWLfA1yL_fjfoWFvY-gA6a5w,5437
+sentry_sdk/integrations/rust_tracing.py,sha256=klroQxV-CW2iq5I2iPA2N-sgv5DJIKQaabkUqJEKghg,9103
+sentry_sdk/integrations/sanic.py,sha256=F01o0Tdo7aaC_4EYw73Hn3R3oPOb1luc-YK7b5ogFZs,13101
+sentry_sdk/integrations/serverless.py,sha256=npiKJuIy_sEkWT_x0Eu2xSEMiMh_aySqGYlnvIROsYk,1804
+sentry_sdk/integrations/socket.py,sha256=UqY4MPZ77x9hAi4vMoXV1AkdZNpVGFqRxP8F3O25Gsw,3036
+sentry_sdk/integrations/spark/__init__.py,sha256=oOewMErnZk2rzNvIlZO6URxQexu9bUJuSLM2m_zECy8,208
+sentry_sdk/integrations/spark/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/integrations/spark/__pycache__/spark_driver.cpython-310.pyc,,
+sentry_sdk/integrations/spark/__pycache__/spark_worker.cpython-310.pyc,,
+sentry_sdk/integrations/spark/spark_driver.py,sha256=hHP5SPy1dOKcqUl0T4QHkkId0dJWDei0ox_GgqwD8po,8340
+sentry_sdk/integrations/spark/spark_worker.py,sha256=FGT4yRU2X_iQCC46aasMmvJfYOKmBip8KbDF_wnhvEY,3706
+sentry_sdk/integrations/sqlalchemy.py,sha256=upu6uhy-SVKMcH9B9xy5OBiozhBUy0gaM0l-HuV5PCI,4538
+sentry_sdk/integrations/starlette.py,sha256=SA9HHZYt5zzL1w4NDO7AQq_kpJHNYodbc6ANK91kKqg,26158
+sentry_sdk/integrations/starlite.py,sha256=0Wk0rv4U9RR5Z_woDlLK9fvI98PJAC6jyv81oh3DhP8,10628
+sentry_sdk/integrations/stdlib.py,sha256=vgB9weDGh455vBwmUSgcQRgzViKstu3O0syOthCn_H0,8831
+sentry_sdk/integrations/strawberry.py,sha256=2qbDZ7R6KcWa57Ufw8S_Im8DbKZGQROjJassVePhHIU,15573
+sentry_sdk/integrations/sys_exit.py,sha256=AwShgGBWPdiY25aOWDLRAs2RBUKm5T3CrL-Q-zAk0l4,2493
+sentry_sdk/integrations/threading.py,sha256=AAQuxDfp9_HuL_1vAwzwSRftsCLsttuQdNiqU1569JU,4011
+sentry_sdk/integrations/tornado.py,sha256=dRNuiRM5x3TAFUsz3IF_r0wTzE7lITu0mAHnJW2hvw4,7259
+sentry_sdk/integrations/trytond.py,sha256=BaLCNqQeRWDbHHDEelS5tmj-p_CrbmtGEHIn6JfzEFE,1651
+sentry_sdk/integrations/wsgi.py,sha256=nv8DMawFPLUSoF74Sm-Wo3K6E6p2djEAPH3jWP_bVC8,10755
+sentry_sdk/metrics.py,sha256=-hfIxEOLcRo2KG1qSuD5F2IgSGdx9eAHGH65oJWCjZ4,30025
+sentry_sdk/monitor.py,sha256=7LydPMKjVRR5eFY9rxgvJv0idExA3sSnrZk-1mHu6G4,3710
+sentry_sdk/profiler/__init__.py,sha256=b3z-s_lUtah4G9rBL38Od5CSs5RbAOCXzgSVkhUCJDo,1063
+sentry_sdk/profiler/__pycache__/__init__.cpython-310.pyc,,
+sentry_sdk/profiler/__pycache__/continuous_profiler.cpython-310.pyc,,
+sentry_sdk/profiler/__pycache__/transaction_profiler.cpython-310.pyc,,
+sentry_sdk/profiler/__pycache__/utils.cpython-310.pyc,,
+sentry_sdk/profiler/continuous_profiler.py,sha256=Y_rXVs1v2S1eCxzQZqZb7hp9af5B3SfvMNmrgu_MGxc,17155
+sentry_sdk/profiler/transaction_profiler.py,sha256=ZRqfytls9bVu8OECniyLQwOu2ha_PbOKl7Gy50ymSl0,27876
+sentry_sdk/profiler/utils.py,sha256=G5s4tYai9ATJqcHrQ3bOIxlK6jIaHzELrDtU5k3N4HI,6556
+sentry_sdk/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sentry_sdk/scope.py,sha256=xLNcCRjwd5TJ7O9ECaKvtxc5Gf76RfkNMTQgTmceLPM,59805
+sentry_sdk/scrubber.py,sha256=F3yIqTD-V6KdPFYsNBqbgmOvd50RZthUi0TluvMJQ8o,5926
+sentry_sdk/serializer.py,sha256=iXiRwTuRj0gcKyHRO0GNTZB1Hmk0LMDiBt6Be7RpGt8,13087
+sentry_sdk/session.py,sha256=TqDVmRKKHUDSmZb4jQR-s8wDt7Fwb6QaG21hawUGWEs,5571
+sentry_sdk/sessions.py,sha256=DKgZh4xq-ccOmTqzX98fp-dZn0b6WwbLCbfMOp8x27o,9181
+sentry_sdk/spotlight.py,sha256=-nnaNvJv_oKJohBNaf3INdsL3DvqFQUydw4mF2hOcjk,7911
+sentry_sdk/tracing.py,sha256=9dNPHCa3dAtYhwFelqyRICU4QQOap5LXRajtIorGM1Y,45879
+sentry_sdk/tracing_utils.py,sha256=AIjE_xVLrHevglKkk5Fr5a6jtiHt5-0Nmpbjn61y2BQ,22386
+sentry_sdk/transport.py,sha256=KAtbh4R45lPyFRP5FAEg2Orf-sLXaZ7IG_IGKY6ClRU,32184
+sentry_sdk/types.py,sha256=S4sOblXMzr39F3BpEECp5M4FsA7uSd5mWiKRFSyeZpY,800
+sentry_sdk/utils.py,sha256=J3ydgt9pNW6YQfFjlLIqk0kAbcL9lFypv5_IsiyrkfM,59792
+sentry_sdk/worker.py,sha256=VSMaigRMbInVyupSFpBC42bft2oIViea-0C_d9ThnIo,4464
diff --git a/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..104f3874635f24f0d2918dfeaf6a59652274460c
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: setuptools (75.6.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/entry_points.txt b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4b128a843cbaf61581876070bb4bbe709ce1c0b5
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[opentelemetry_propagator]
+sentry = sentry_sdk.integrations.opentelemetry:SentryPropagator
diff --git a/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/top_level.txt b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5051901ecb46b7905ec9fcb8dfa5c2711d494b8d
--- /dev/null
+++ b/vllm/lib/python3.10/site-packages/sentry_sdk-2.19.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+sentry_sdk